core_validation.cpp revision 27e2917587adf1738c71851a128806f7de578cbe
1/* Copyright (c) 2015-2016 The Khronos Group Inc. 2 * Copyright (c) 2015-2016 Valve Corporation 3 * Copyright (c) 2015-2016 LunarG, Inc. 4 * Copyright (C) 2015-2016 Google Inc. 5 * 6 * Licensed under the Apache License, Version 2.0 (the "License"); 7 * you may not use this file except in compliance with the License. 8 * You may obtain a copy of the License at 9 * 10 * http://www.apache.org/licenses/LICENSE-2.0 11 * 12 * Unless required by applicable law or agreed to in writing, software 13 * distributed under the License is distributed on an "AS IS" BASIS, 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 * See the License for the specific language governing permissions and 16 * limitations under the License. 17 * 18 * Author: Cody Northrop <cnorthrop@google.com> 19 * Author: Michael Lentine <mlentine@google.com> 20 * Author: Tobin Ehlis <tobine@google.com> 21 * Author: Chia-I Wu <olv@google.com> 22 * Author: Chris Forbes <chrisf@ijw.co.nz> 23 * Author: Mark Lobodzinski <mark@lunarg.com> 24 * Author: Ian Elliott <ianelliott@google.com> 25 */ 26 27// Allow use of STL min and max functions in Windows 28#define NOMINMAX 29 30// Turn on mem_tracker merged code 31#define MTMERGESOURCE 1 32 33#include <SPIRV/spirv.hpp> 34#include <algorithm> 35#include <assert.h> 36#include <iostream> 37#include <list> 38#include <map> 39#include <mutex> 40#include <set> 41//#include <memory> 42#include <stdio.h> 43#include <stdlib.h> 44#include <string.h> 45#include <string> 46#include <tuple> 47 48#include "vk_loader_platform.h" 49#include "vk_dispatch_table_helper.h" 50#include "vk_struct_string_helper_cpp.h" 51#if defined(__GNUC__) 52#pragma GCC diagnostic ignored "-Wwrite-strings" 53#endif 54#if defined(__GNUC__) 55#pragma GCC diagnostic warning "-Wwrite-strings" 56#endif 57#include "vk_struct_size_helper.h" 58#include "core_validation.h" 59#include "vk_layer_table.h" 60#include "vk_layer_data.h" 61#include "vk_layer_extension_utils.h" 62#include "vk_layer_utils.h" 63#include "spirv-tools/libspirv.h" 64 65#if defined __ANDROID__ 66#include <android/log.h> 67#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__)) 68#else 69#define LOGCONSOLE(...) \ 70 { \ 71 printf(__VA_ARGS__); \ 72 printf("\n"); \ 73 } 74#endif 75 76using namespace std; 77 78// TODO : CB really needs it's own class and files so this is just temp code until that happens 79GLOBAL_CB_NODE::~GLOBAL_CB_NODE() { 80 for (uint32_t i=0; i<VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) { 81 // Make sure that no sets hold onto deleted CB binding 82 for (auto set : lastBound[i].uniqueBoundSets) { 83 set->RemoveBoundCommandBuffer(this); 84 } 85 } 86} 87 88namespace core_validation { 89 90using std::unordered_map; 91using std::unordered_set; 92 93// WSI Image Objects bypass usual Image Object creation methods. A special Memory 94// Object value will be used to identify them internally. 95static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1); 96 97struct devExts { 98 bool wsi_enabled; 99 unordered_map<VkSwapchainKHR, unique_ptr<SWAPCHAIN_NODE>> swapchainMap; 100 unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap; 101}; 102 103// fwd decls 104struct shader_module; 105 106// TODO : Split this into separate structs for instance and device level data? 107struct layer_data { 108 VkInstance instance; 109 unique_ptr<INSTANCE_STATE> instance_state; 110 111 112 debug_report_data *report_data; 113 std::vector<VkDebugReportCallbackEXT> logging_callback; 114 VkLayerDispatchTable *device_dispatch_table; 115 VkLayerInstanceDispatchTable *instance_dispatch_table; 116 117 devExts device_extensions; 118 unordered_set<VkQueue> queues; // All queues under given device 119 // Vector indices correspond to queueFamilyIndex 120 vector<unique_ptr<VkQueueFamilyProperties>> queue_family_properties; 121 // Global set of all cmdBuffers that are inFlight on this device 122 unordered_set<VkCommandBuffer> globalInFlightCmdBuffers; 123 // Layer specific data 124 unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> samplerMap; 125 unordered_map<VkImageView, unique_ptr<VkImageViewCreateInfo>> imageViewMap; 126 unordered_map<VkImage, unique_ptr<IMAGE_NODE>> imageMap; 127 unordered_map<VkBufferView, unique_ptr<VkBufferViewCreateInfo>> bufferViewMap; 128 unordered_map<VkBuffer, unique_ptr<BUFFER_NODE>> bufferMap; 129 unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap; 130 unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap; 131 unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap; 132 unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap; 133 unordered_map<VkDescriptorSetLayout, cvdescriptorset::DescriptorSetLayout *> descriptorSetLayoutMap; 134 unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap; 135 unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap; 136 unordered_map<VkFence, FENCE_NODE> fenceMap; 137 unordered_map<VkQueue, QUEUE_NODE> queueMap; 138 unordered_map<VkEvent, EVENT_NODE> eventMap; 139 unordered_map<QueryObject, bool> queryToStateMap; 140 unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap; 141 unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap; 142 unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap; 143 unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_NODE>> frameBufferMap; 144 unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap; 145 unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap; 146 unordered_map<VkRenderPass, RENDER_PASS_NODE *> renderPassMap; 147 unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap; 148 VkDevice device; 149 150 // Device specific data 151 PHYS_DEV_PROPERTIES_NODE phys_dev_properties; 152 VkPhysicalDeviceMemoryProperties phys_dev_mem_props; 153 VkPhysicalDeviceFeatures physical_device_features; 154 unique_ptr<PHYSICAL_DEVICE_STATE> physical_device_state; 155 156 layer_data() 157 : instance_state(nullptr), report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr), 158 device_extensions(), device(VK_NULL_HANDLE), phys_dev_properties{}, phys_dev_mem_props{}, physical_device_features{}, 159 physical_device_state(nullptr){}; 160}; 161 162// TODO : Do we need to guard access to layer_data_map w/ lock? 163static unordered_map<void *, layer_data *> layer_data_map; 164 165static const VkLayerProperties global_layer = { 166 "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer", 167}; 168 169template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) { 170 bool foundLayer = false; 171 for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) { 172 if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) { 173 foundLayer = true; 174 } 175 // This has to be logged to console as we don't have a callback at this point. 176 if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) { 177 LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.", 178 global_layer.layerName); 179 } 180 } 181} 182 183// Code imported from shader_checker 184static void build_def_index(shader_module *); 185 186// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words 187// without the caller needing to care too much about the physical SPIRV module layout. 188struct spirv_inst_iter { 189 std::vector<uint32_t>::const_iterator zero; 190 std::vector<uint32_t>::const_iterator it; 191 192 uint32_t len() { 193 auto result = *it >> 16; 194 assert(result > 0); 195 return result; 196 } 197 198 uint32_t opcode() { return *it & 0x0ffffu; } 199 200 uint32_t const &word(unsigned n) { 201 assert(n < len()); 202 return it[n]; 203 } 204 205 uint32_t offset() { return (uint32_t)(it - zero); } 206 207 spirv_inst_iter() {} 208 209 spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {} 210 211 bool operator==(spirv_inst_iter const &other) { return it == other.it; } 212 213 bool operator!=(spirv_inst_iter const &other) { return it != other.it; } 214 215 spirv_inst_iter operator++(int) { /* x++ */ 216 spirv_inst_iter ii = *this; 217 it += len(); 218 return ii; 219 } 220 221 spirv_inst_iter operator++() { /* ++x; */ 222 it += len(); 223 return *this; 224 } 225 226 /* The iterator and the value are the same thing. */ 227 spirv_inst_iter &operator*() { return *this; } 228 spirv_inst_iter const &operator*() const { return *this; } 229}; 230 231struct shader_module { 232 /* the spirv image itself */ 233 vector<uint32_t> words; 234 /* a mapping of <id> to the first word of its def. this is useful because walking type 235 * trees, constant expressions, etc requires jumping all over the instruction stream. 236 */ 237 unordered_map<unsigned, unsigned> def_index; 238 239 shader_module(VkShaderModuleCreateInfo const *pCreateInfo) 240 : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)), 241 def_index() { 242 243 build_def_index(this); 244 } 245 246 /* expose begin() / end() to enable range-based for */ 247 spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */ 248 spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); } /* just past last insn */ 249 /* given an offset into the module, produce an iterator there. */ 250 spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); } 251 252 /* gets an iterator to the definition of an id */ 253 spirv_inst_iter get_def(unsigned id) const { 254 auto it = def_index.find(id); 255 if (it == def_index.end()) { 256 return end(); 257 } 258 return at(it->second); 259 } 260}; 261 262// TODO : This can be much smarter, using separate locks for separate global data 263static std::mutex global_lock; 264 265// Return ImageViewCreateInfo ptr for specified imageView or else NULL 266VkImageViewCreateInfo *getImageViewData(const layer_data *dev_data, VkImageView image_view) { 267 auto iv_it = dev_data->imageViewMap.find(image_view); 268 if (iv_it == dev_data->imageViewMap.end()) { 269 return nullptr; 270 } 271 return iv_it->second.get(); 272} 273// Return sampler node ptr for specified sampler or else NULL 274SAMPLER_NODE *getSamplerNode(const layer_data *dev_data, VkSampler sampler) { 275 auto sampler_it = dev_data->samplerMap.find(sampler); 276 if (sampler_it == dev_data->samplerMap.end()) { 277 return nullptr; 278 } 279 return sampler_it->second.get(); 280} 281// Return image node ptr for specified image or else NULL 282IMAGE_NODE *getImageNode(const layer_data *dev_data, VkImage image) { 283 auto img_it = dev_data->imageMap.find(image); 284 if (img_it == dev_data->imageMap.end()) { 285 return nullptr; 286 } 287 return img_it->second.get(); 288} 289// Return buffer node ptr for specified buffer or else NULL 290BUFFER_NODE *getBufferNode(const layer_data *dev_data, VkBuffer buffer) { 291 auto buff_it = dev_data->bufferMap.find(buffer); 292 if (buff_it == dev_data->bufferMap.end()) { 293 return nullptr; 294 } 295 return buff_it->second.get(); 296} 297// Return swapchain node for specified swapchain or else NULL 298SWAPCHAIN_NODE *getSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) { 299 auto swp_it = dev_data->device_extensions.swapchainMap.find(swapchain); 300 if (swp_it == dev_data->device_extensions.swapchainMap.end()) { 301 return nullptr; 302 } 303 return swp_it->second.get(); 304} 305// Return swapchain for specified image or else NULL 306VkSwapchainKHR getSwapchainFromImage(const layer_data *dev_data, VkImage image) { 307 auto img_it = dev_data->device_extensions.imageToSwapchainMap.find(image); 308 if (img_it == dev_data->device_extensions.imageToSwapchainMap.end()) { 309 return VK_NULL_HANDLE; 310 } 311 return img_it->second; 312} 313// Return buffer node ptr for specified buffer or else NULL 314VkBufferViewCreateInfo *getBufferViewInfo(const layer_data *my_data, VkBufferView buffer_view) { 315 auto bv_it = my_data->bufferViewMap.find(buffer_view); 316 if (bv_it == my_data->bufferViewMap.end()) { 317 return nullptr; 318 } 319 return bv_it->second.get(); 320} 321 322FENCE_NODE *getFenceNode(layer_data *dev_data, VkFence fence) { 323 auto it = dev_data->fenceMap.find(fence); 324 if (it == dev_data->fenceMap.end()) { 325 return nullptr; 326 } 327 return &it->second; 328} 329 330QUEUE_NODE *getQueueNode(layer_data *dev_data, VkQueue queue) { 331 auto it = dev_data->queueMap.find(queue); 332 if (it == dev_data->queueMap.end()) { 333 return nullptr; 334 } 335 return &it->second; 336} 337 338SEMAPHORE_NODE *getSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) { 339 auto it = dev_data->semaphoreMap.find(semaphore); 340 if (it == dev_data->semaphoreMap.end()) { 341 return nullptr; 342 } 343 return &it->second; 344} 345 346COMMAND_POOL_NODE *getCommandPoolNode(layer_data *dev_data, VkCommandPool pool) { 347 auto it = dev_data->commandPoolMap.find(pool); 348 if (it == dev_data->commandPoolMap.end()) { 349 return nullptr; 350 } 351 return &it->second; 352} 353 354static VkDeviceMemory *get_object_mem_binding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) { 355 switch (type) { 356 case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: { 357 auto img_node = getImageNode(my_data, VkImage(handle)); 358 if (img_node) 359 return &img_node->mem; 360 break; 361 } 362 case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: { 363 auto buff_node = getBufferNode(my_data, VkBuffer(handle)); 364 if (buff_node) 365 return &buff_node->mem; 366 break; 367 } 368 default: 369 break; 370 } 371 return nullptr; 372} 373 374// prototype 375static GLOBAL_CB_NODE *getCBNode(layer_data const *, const VkCommandBuffer); 376 377// Helper function to validate correct usage bits set for buffers or images 378// Verify that (actual & desired) flags != 0 or, 379// if strict is true, verify that (actual & desired) flags == desired 380// In case of error, report it via dbg callbacks 381static bool validate_usage_flags(layer_data *my_data, VkFlags actual, VkFlags desired, VkBool32 strict, 382 uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str, 383 char const *func_name, char const *usage_str) { 384 bool correct_usage = false; 385 bool skip_call = false; 386 if (strict) 387 correct_usage = ((actual & desired) == desired); 388 else 389 correct_usage = ((actual & desired) != 0); 390 if (!correct_usage) { 391 skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__, 392 MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s 0x%" PRIxLEAST64 393 " used by %s. In this case, %s should have %s set during creation.", 394 ty_str, obj_handle, func_name, ty_str, usage_str); 395 } 396 return skip_call; 397} 398 399// Helper function to validate usage flags for buffers 400// For given buffer_node send actual vs. desired usage off to helper above where 401// an error will be flagged if usage is not correct 402static bool validateImageUsageFlags(layer_data *dev_data, IMAGE_NODE const *image_node, VkFlags desired, VkBool32 strict, 403 char const *func_name, char const *usage_string) { 404 return validate_usage_flags(dev_data, image_node->createInfo.usage, desired, strict, 405 reinterpret_cast<const uint64_t &>(image_node->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 406 "image", func_name, usage_string); 407} 408 409// Helper function to validate usage flags for buffers 410// For given buffer_node send actual vs. desired usage off to helper above where 411// an error will be flagged if usage is not correct 412static bool validateBufferUsageFlags(layer_data *dev_data, BUFFER_NODE const *buffer_node, VkFlags desired, VkBool32 strict, 413 char const *func_name, char const *usage_string) { 414 return validate_usage_flags(dev_data, buffer_node->createInfo.usage, desired, strict, 415 reinterpret_cast<const uint64_t &>(buffer_node->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, 416 "buffer", func_name, usage_string); 417} 418 419// Return ptr to info in map container containing mem, or NULL if not found 420// Calls to this function should be wrapped in mutex 421DEVICE_MEM_INFO *getMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) { 422 auto mem_it = dev_data->memObjMap.find(mem); 423 if (mem_it == dev_data->memObjMap.end()) { 424 return NULL; 425 } 426 return mem_it->second.get(); 427} 428 429static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem, 430 const VkMemoryAllocateInfo *pAllocateInfo) { 431 assert(object != NULL); 432 433 my_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo)); 434} 435 436static bool validate_memory_is_valid(layer_data *dev_data, VkDeviceMemory mem, const char *functionName, 437 VkImage image = VK_NULL_HANDLE) { 438 if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) { 439 auto const image_node = getImageNode(dev_data, image); 440 if (image_node && !image_node->valid) { 441 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 442 (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM", 443 "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.", 444 functionName, (uint64_t)(image)); 445 } 446 } else { 447 DEVICE_MEM_INFO *pMemObj = getMemObjInfo(dev_data, mem); 448 if (pMemObj && !pMemObj->valid) { 449 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 450 (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM", 451 "%s: Cannot read invalid memory 0x%" PRIx64 ", please fill the memory before using.", functionName, 452 (uint64_t)(mem)); 453 } 454 } 455 return false; 456} 457 458static void set_memory_valid(layer_data *dev_data, VkDeviceMemory mem, bool valid, VkImage image = VK_NULL_HANDLE) { 459 if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) { 460 auto image_node = getImageNode(dev_data, image); 461 if (image_node) { 462 image_node->valid = valid; 463 } 464 } else { 465 DEVICE_MEM_INFO *pMemObj = getMemObjInfo(dev_data, mem); 466 if (pMemObj) { 467 pMemObj->valid = valid; 468 } 469 } 470} 471 472// Find CB Info and add mem reference to list container 473// Find Mem Obj Info and add CB reference to list container 474static bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem, 475 const char *apiName) { 476 bool skip_call = false; 477 478 // Skip validation if this image was created through WSI 479 if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) { 480 481 // First update CB binding in MemObj mini CB list 482 DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem); 483 if (pMemInfo) { 484 pMemInfo->commandBufferBindings.insert(cb); 485 // Now update CBInfo's Mem reference list 486 GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb); 487 // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object 488 if (pCBNode) { 489 pCBNode->memObjs.insert(mem); 490 } 491 } 492 } 493 return skip_call; 494} 495 496// Create binding link between given iamge node and command buffer node 497static bool addCommandBufferBindingImage(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_NODE *img_node, const char *apiName) { 498 bool skip_call = false; 499 // Skip validation if this image was created through WSI 500 if (img_node->mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) { 501 // First update CB binding in MemObj mini CB list 502 DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, img_node->mem); 503 if (pMemInfo) { 504 pMemInfo->commandBufferBindings.insert(cb_node->commandBuffer); 505 // Now update CBInfo's Mem reference list 506 cb_node->memObjs.insert(img_node->mem); 507 } 508 } 509 // Now update cb binding for image 510 img_node->cb_bindings.insert(cb_node); 511 return skip_call; 512} 513 514// Create binding link between given buffer node and command buffer node 515static bool addCommandBufferBindingBuffer(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_NODE *buff_node, 516 const char *apiName) { 517 bool skip_call = false; 518 519 // First update CB binding in MemObj mini CB list 520 DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, buff_node->mem); 521 if (pMemInfo) { 522 pMemInfo->commandBufferBindings.insert(cb_node->commandBuffer); 523 // Now update CBInfo's Mem reference list 524 cb_node->memObjs.insert(buff_node->mem); 525 } 526 // Now update cb binding for buffer 527 buff_node->cb_bindings.insert(cb_node); 528 529 return skip_call; 530} 531 532// For every mem obj bound to particular CB, free bindings related to that CB 533static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *pCBNode) { 534 if (pCBNode) { 535 if (pCBNode->memObjs.size() > 0) { 536 for (auto mem : pCBNode->memObjs) { 537 DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem); 538 if (pInfo) { 539 pInfo->commandBufferBindings.erase(pCBNode->commandBuffer); 540 } 541 } 542 pCBNode->memObjs.clear(); 543 } 544 pCBNode->validate_functions.clear(); 545 } 546} 547// Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up 548static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) { 549 clear_cmd_buf_and_mem_references(dev_data, getCBNode(dev_data, cb)); 550} 551 552// For given MemObjInfo, report Obj & CB bindings 553static bool reportMemReferencesAndCleanUp(layer_data *dev_data, DEVICE_MEM_INFO *pMemObjInfo) { 554 bool skip_call = false; 555 size_t cmdBufRefCount = pMemObjInfo->commandBufferBindings.size(); 556 size_t objRefCount = pMemObjInfo->objBindings.size(); 557 558 if ((pMemObjInfo->commandBufferBindings.size()) != 0) { 559 skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 560 (uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM", 561 "Attempting to free memory object 0x%" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER 562 " references", 563 (uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount)); 564 } 565 566 if (cmdBufRefCount > 0 && pMemObjInfo->commandBufferBindings.size() > 0) { 567 for (auto cb : pMemObjInfo->commandBufferBindings) { 568 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 569 (uint64_t)cb, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM", 570 "Command Buffer 0x%p still has a reference to mem obj 0x%" PRIxLEAST64, cb, (uint64_t)pMemObjInfo->mem); 571 } 572 // Clear the list of hanging references 573 pMemObjInfo->commandBufferBindings.clear(); 574 } 575 576 if (objRefCount > 0 && pMemObjInfo->objBindings.size() > 0) { 577 for (auto obj : pMemObjInfo->objBindings) { 578 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__, 579 MEMTRACK_FREED_MEM_REF, "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64, 580 obj.handle, (uint64_t)pMemObjInfo->mem); 581 } 582 // Clear the list of hanging references 583 pMemObjInfo->objBindings.clear(); 584 } 585 return skip_call; 586} 587 588static bool deleteMemObjInfo(layer_data *my_data, void *object, VkDeviceMemory mem) { 589 bool skip_call = false; 590 auto item = my_data->memObjMap.find(mem); 591 if (item != my_data->memObjMap.end()) { 592 my_data->memObjMap.erase(item); 593 } else { 594 skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 595 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM", 596 "Request to delete memory object 0x%" PRIxLEAST64 " not present in memory Object Map", (uint64_t)mem); 597 } 598 return skip_call; 599} 600 601static bool freeMemObjInfo(layer_data *dev_data, void *object, VkDeviceMemory mem, bool internal) { 602 bool skip_call = false; 603 // Parse global list to find info w/ mem 604 DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem); 605 if (pInfo) { 606 if (pInfo->allocInfo.allocationSize == 0 && !internal) { 607 // TODO: Verify against Valid Use section 608 skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 609 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM", 610 "Attempting to free memory associated with a Persistent Image, 0x%" PRIxLEAST64 ", " 611 "this should not be explicitly freed\n", 612 (uint64_t)mem); 613 } else { 614 // Clear any CB bindings for completed CBs 615 // TODO : Is there a better place to do this? 616 617 assert(pInfo->object != VK_NULL_HANDLE); 618 // clear_cmd_buf_and_mem_references removes elements from 619 // pInfo->commandBufferBindings -- this copy not needed in c++14, 620 // and probably not needed in practice in c++11 621 auto bindings = pInfo->commandBufferBindings; 622 for (auto cb : bindings) { 623 if (!dev_data->globalInFlightCmdBuffers.count(cb)) { 624 clear_cmd_buf_and_mem_references(dev_data, cb); 625 } 626 } 627 628 // Now verify that no references to this mem obj remain and remove bindings 629 if (pInfo->commandBufferBindings.size() || pInfo->objBindings.size()) { 630 skip_call |= reportMemReferencesAndCleanUp(dev_data, pInfo); 631 } 632 // Delete mem obj info 633 skip_call |= deleteMemObjInfo(dev_data, object, mem); 634 } 635 } 636 return skip_call; 637} 638 639static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) { 640 switch (type) { 641 case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: 642 return "image"; 643 case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: 644 return "buffer"; 645 case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT: 646 return "swapchain"; 647 case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: 648 return "descriptor set"; 649 case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: 650 return "buffer"; 651 default: 652 return "unknown"; 653 } 654} 655 656// Remove object binding performs 3 tasks: 657// 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it 658// 2. Clear mem binding for image/buffer by setting its handle to 0 659// TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized? 660static bool clear_object_binding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) { 661 // TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately 662 bool skip_call = false; 663 VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type); 664 if (pMemBinding) { 665 DEVICE_MEM_INFO *pMemObjInfo = getMemObjInfo(dev_data, *pMemBinding); 666 // TODO : Make sure this is a reasonable way to reset mem binding 667 *pMemBinding = VK_NULL_HANDLE; 668 if (pMemObjInfo) { 669 // This obj is bound to a memory object. Remove the reference to this object in that memory object's list, 670 // and set the objects memory binding pointer to NULL. 671 if (!pMemObjInfo->objBindings.erase({handle, type})) { 672 skip_call |= 673 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT, 674 "MEM", "While trying to clear mem binding for %s obj 0x%" PRIxLEAST64 675 ", unable to find that object referenced by mem obj 0x%" PRIxLEAST64, 676 object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem); 677 } 678 } 679 } 680 return skip_call; 681} 682 683// For NULL mem case, output warning 684// Make sure given object is in global object map 685// IF a previous binding existed, output validation error 686// Otherwise, add reference from objectInfo to memoryInfo 687// Add reference off of objInfo 688static bool set_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, 689 VkDebugReportObjectTypeEXT type, const char *apiName) { 690 bool skip_call = false; 691 // Handle NULL case separately, just clear previous binding & decrement reference 692 if (mem == VK_NULL_HANDLE) { 693 // TODO: Verify against Valid Use section of spec. 694 skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ, 695 "MEM", "In %s, attempting to Bind Obj(0x%" PRIxLEAST64 ") to NULL", apiName, handle); 696 } else { 697 VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type); 698 assert(pMemBinding); 699 DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem); 700 if (pMemInfo) { 701 DEVICE_MEM_INFO *pPrevBinding = getMemObjInfo(dev_data, *pMemBinding); 702 if (pPrevBinding != NULL) { 703 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 704 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_REBIND_OBJECT, 705 "MEM", "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64 706 ") which has already been bound to mem object 0x%" PRIxLEAST64, 707 apiName, (uint64_t)mem, handle, (uint64_t)pPrevBinding->mem); 708 } else { 709 pMemInfo->objBindings.insert({handle, type}); 710 // For image objects, make sure default memory state is correctly set 711 // TODO : What's the best/correct way to handle this? 712 if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) { 713 auto const image_node = getImageNode(dev_data, VkImage(handle)); 714 if (image_node) { 715 VkImageCreateInfo ici = image_node->createInfo; 716 if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) { 717 // TODO:: More memory state transition stuff. 718 } 719 } 720 } 721 *pMemBinding = mem; 722 } 723 } 724 } 725 return skip_call; 726} 727 728// For NULL mem case, clear any previous binding Else... 729// Make sure given object is in its object map 730// IF a previous binding existed, update binding 731// Add reference from objectInfo to memoryInfo 732// Add reference off of object's binding info 733// Return VK_TRUE if addition is successful, VK_FALSE otherwise 734static bool set_sparse_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, 735 VkDebugReportObjectTypeEXT type, const char *apiName) { 736 bool skip_call = VK_FALSE; 737 // Handle NULL case separately, just clear previous binding & decrement reference 738 if (mem == VK_NULL_HANDLE) { 739 skip_call = clear_object_binding(dev_data, handle, type); 740 } else { 741 VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type); 742 assert(pMemBinding); 743 DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem); 744 if (pInfo) { 745 pInfo->objBindings.insert({handle, type}); 746 // Need to set mem binding for this object 747 *pMemBinding = mem; 748 } 749 } 750 return skip_call; 751} 752 753// For handle of given object type, return memory binding 754static bool get_mem_for_type(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) { 755 bool skip_call = false; 756 *mem = VK_NULL_HANDLE; 757 switch (type) { 758 case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: 759 *mem = getImageNode(dev_data, VkImage(handle))->mem; 760 break; 761 case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: 762 *mem = getBufferNode(dev_data, VkBuffer(handle))->mem; 763 break; 764 default: 765 assert(0); 766 } 767 if (!*mem) { 768 skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT, 769 "MEM", "Trying to get mem binding for %s object 0x%" PRIxLEAST64 770 " but binding is NULL. Has memory been bound to this object?", 771 object_type_to_string(type), handle); 772 } 773 return skip_call; 774} 775 776// Print details of MemObjInfo list 777static void print_mem_list(layer_data *dev_data) { 778 // Early out if info is not requested 779 if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) { 780 return; 781 } 782 783 // Just printing each msg individually for now, may want to package these into single large print 784 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__, 785 MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)", 786 dev_data->memObjMap.size()); 787 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__, 788 MEMTRACK_NONE, "MEM", "============================="); 789 790 if (dev_data->memObjMap.size() <= 0) 791 return; 792 793 for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) { 794 auto mem_info = (*ii).second.get(); 795 796 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, 797 __LINE__, MEMTRACK_NONE, "MEM", " ===MemObjInfo at 0x%p===", (void *)mem_info); 798 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, 799 __LINE__, MEMTRACK_NONE, "MEM", " Mem object: 0x%" PRIxLEAST64, (uint64_t)(mem_info->mem)); 800 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, 801 __LINE__, MEMTRACK_NONE, "MEM", " Ref Count: " PRINTF_SIZE_T_SPECIFIER, 802 mem_info->commandBufferBindings.size() + mem_info->objBindings.size()); 803 if (0 != mem_info->allocInfo.allocationSize) { 804 string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&mem_info->allocInfo, "MEM(INFO): "); 805 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, 806 __LINE__, MEMTRACK_NONE, "MEM", " Mem Alloc info:\n%s", pAllocInfoMsg.c_str()); 807 } else { 808 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, 809 __LINE__, MEMTRACK_NONE, "MEM", " Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())"); 810 } 811 812 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, 813 __LINE__, MEMTRACK_NONE, "MEM", " VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:", 814 mem_info->objBindings.size()); 815 if (mem_info->objBindings.size() > 0) { 816 for (auto obj : mem_info->objBindings) { 817 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 818 0, __LINE__, MEMTRACK_NONE, "MEM", " VK OBJECT 0x%" PRIx64, obj.handle); 819 } 820 } 821 822 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, 823 __LINE__, MEMTRACK_NONE, "MEM", 824 " VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements", 825 mem_info->commandBufferBindings.size()); 826 if (mem_info->commandBufferBindings.size() > 0) { 827 for (auto cb : mem_info->commandBufferBindings) { 828 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 829 0, __LINE__, MEMTRACK_NONE, "MEM", " VK CB 0x%p", cb); 830 } 831 } 832 } 833} 834 835static void printCBList(layer_data *my_data) { 836 GLOBAL_CB_NODE *pCBInfo = NULL; 837 838 // Early out if info is not requested 839 if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) { 840 return; 841 } 842 843 log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__, 844 MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)", 845 my_data->commandBufferMap.size()); 846 log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__, 847 MEMTRACK_NONE, "MEM", "=================="); 848 849 if (my_data->commandBufferMap.size() <= 0) 850 return; 851 852 for (auto &cb_node : my_data->commandBufferMap) { 853 pCBInfo = cb_node.second; 854 855 log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, 856 __LINE__, MEMTRACK_NONE, "MEM", " CB Info (0x%p) has CB 0x%p", (void *)pCBInfo, (void *)pCBInfo->commandBuffer); 857 858 if (pCBInfo->memObjs.size() <= 0) 859 continue; 860 for (auto obj : pCBInfo->memObjs) { 861 log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, 862 __LINE__, MEMTRACK_NONE, "MEM", " Mem obj 0x%" PRIx64, (uint64_t)obj); 863 } 864 } 865} 866 867// Return a string representation of CMD_TYPE enum 868static string cmdTypeToString(CMD_TYPE cmd) { 869 switch (cmd) { 870 case CMD_BINDPIPELINE: 871 return "CMD_BINDPIPELINE"; 872 case CMD_BINDPIPELINEDELTA: 873 return "CMD_BINDPIPELINEDELTA"; 874 case CMD_SETVIEWPORTSTATE: 875 return "CMD_SETVIEWPORTSTATE"; 876 case CMD_SETLINEWIDTHSTATE: 877 return "CMD_SETLINEWIDTHSTATE"; 878 case CMD_SETDEPTHBIASSTATE: 879 return "CMD_SETDEPTHBIASSTATE"; 880 case CMD_SETBLENDSTATE: 881 return "CMD_SETBLENDSTATE"; 882 case CMD_SETDEPTHBOUNDSSTATE: 883 return "CMD_SETDEPTHBOUNDSSTATE"; 884 case CMD_SETSTENCILREADMASKSTATE: 885 return "CMD_SETSTENCILREADMASKSTATE"; 886 case CMD_SETSTENCILWRITEMASKSTATE: 887 return "CMD_SETSTENCILWRITEMASKSTATE"; 888 case CMD_SETSTENCILREFERENCESTATE: 889 return "CMD_SETSTENCILREFERENCESTATE"; 890 case CMD_BINDDESCRIPTORSETS: 891 return "CMD_BINDDESCRIPTORSETS"; 892 case CMD_BINDINDEXBUFFER: 893 return "CMD_BINDINDEXBUFFER"; 894 case CMD_BINDVERTEXBUFFER: 895 return "CMD_BINDVERTEXBUFFER"; 896 case CMD_DRAW: 897 return "CMD_DRAW"; 898 case CMD_DRAWINDEXED: 899 return "CMD_DRAWINDEXED"; 900 case CMD_DRAWINDIRECT: 901 return "CMD_DRAWINDIRECT"; 902 case CMD_DRAWINDEXEDINDIRECT: 903 return "CMD_DRAWINDEXEDINDIRECT"; 904 case CMD_DISPATCH: 905 return "CMD_DISPATCH"; 906 case CMD_DISPATCHINDIRECT: 907 return "CMD_DISPATCHINDIRECT"; 908 case CMD_COPYBUFFER: 909 return "CMD_COPYBUFFER"; 910 case CMD_COPYIMAGE: 911 return "CMD_COPYIMAGE"; 912 case CMD_BLITIMAGE: 913 return "CMD_BLITIMAGE"; 914 case CMD_COPYBUFFERTOIMAGE: 915 return "CMD_COPYBUFFERTOIMAGE"; 916 case CMD_COPYIMAGETOBUFFER: 917 return "CMD_COPYIMAGETOBUFFER"; 918 case CMD_CLONEIMAGEDATA: 919 return "CMD_CLONEIMAGEDATA"; 920 case CMD_UPDATEBUFFER: 921 return "CMD_UPDATEBUFFER"; 922 case CMD_FILLBUFFER: 923 return "CMD_FILLBUFFER"; 924 case CMD_CLEARCOLORIMAGE: 925 return "CMD_CLEARCOLORIMAGE"; 926 case CMD_CLEARATTACHMENTS: 927 return "CMD_CLEARCOLORATTACHMENT"; 928 case CMD_CLEARDEPTHSTENCILIMAGE: 929 return "CMD_CLEARDEPTHSTENCILIMAGE"; 930 case CMD_RESOLVEIMAGE: 931 return "CMD_RESOLVEIMAGE"; 932 case CMD_SETEVENT: 933 return "CMD_SETEVENT"; 934 case CMD_RESETEVENT: 935 return "CMD_RESETEVENT"; 936 case CMD_WAITEVENTS: 937 return "CMD_WAITEVENTS"; 938 case CMD_PIPELINEBARRIER: 939 return "CMD_PIPELINEBARRIER"; 940 case CMD_BEGINQUERY: 941 return "CMD_BEGINQUERY"; 942 case CMD_ENDQUERY: 943 return "CMD_ENDQUERY"; 944 case CMD_RESETQUERYPOOL: 945 return "CMD_RESETQUERYPOOL"; 946 case CMD_COPYQUERYPOOLRESULTS: 947 return "CMD_COPYQUERYPOOLRESULTS"; 948 case CMD_WRITETIMESTAMP: 949 return "CMD_WRITETIMESTAMP"; 950 case CMD_INITATOMICCOUNTERS: 951 return "CMD_INITATOMICCOUNTERS"; 952 case CMD_LOADATOMICCOUNTERS: 953 return "CMD_LOADATOMICCOUNTERS"; 954 case CMD_SAVEATOMICCOUNTERS: 955 return "CMD_SAVEATOMICCOUNTERS"; 956 case CMD_BEGINRENDERPASS: 957 return "CMD_BEGINRENDERPASS"; 958 case CMD_ENDRENDERPASS: 959 return "CMD_ENDRENDERPASS"; 960 default: 961 return "UNKNOWN"; 962 } 963} 964 965// SPIRV utility functions 966static void build_def_index(shader_module *module) { 967 for (auto insn : *module) { 968 switch (insn.opcode()) { 969 /* Types */ 970 case spv::OpTypeVoid: 971 case spv::OpTypeBool: 972 case spv::OpTypeInt: 973 case spv::OpTypeFloat: 974 case spv::OpTypeVector: 975 case spv::OpTypeMatrix: 976 case spv::OpTypeImage: 977 case spv::OpTypeSampler: 978 case spv::OpTypeSampledImage: 979 case spv::OpTypeArray: 980 case spv::OpTypeRuntimeArray: 981 case spv::OpTypeStruct: 982 case spv::OpTypeOpaque: 983 case spv::OpTypePointer: 984 case spv::OpTypeFunction: 985 case spv::OpTypeEvent: 986 case spv::OpTypeDeviceEvent: 987 case spv::OpTypeReserveId: 988 case spv::OpTypeQueue: 989 case spv::OpTypePipe: 990 module->def_index[insn.word(1)] = insn.offset(); 991 break; 992 993 /* Fixed constants */ 994 case spv::OpConstantTrue: 995 case spv::OpConstantFalse: 996 case spv::OpConstant: 997 case spv::OpConstantComposite: 998 case spv::OpConstantSampler: 999 case spv::OpConstantNull: 1000 module->def_index[insn.word(2)] = insn.offset(); 1001 break; 1002 1003 /* Specialization constants */ 1004 case spv::OpSpecConstantTrue: 1005 case spv::OpSpecConstantFalse: 1006 case spv::OpSpecConstant: 1007 case spv::OpSpecConstantComposite: 1008 case spv::OpSpecConstantOp: 1009 module->def_index[insn.word(2)] = insn.offset(); 1010 break; 1011 1012 /* Variables */ 1013 case spv::OpVariable: 1014 module->def_index[insn.word(2)] = insn.offset(); 1015 break; 1016 1017 /* Functions */ 1018 case spv::OpFunction: 1019 module->def_index[insn.word(2)] = insn.offset(); 1020 break; 1021 1022 default: 1023 /* We don't care about any other defs for now. */ 1024 break; 1025 } 1026 } 1027} 1028 1029static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) { 1030 for (auto insn : *src) { 1031 if (insn.opcode() == spv::OpEntryPoint) { 1032 auto entrypointName = (char const *)&insn.word(3); 1033 auto entrypointStageBits = 1u << insn.word(1); 1034 1035 if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) { 1036 return insn; 1037 } 1038 } 1039 } 1040 1041 return src->end(); 1042} 1043 1044static char const *storage_class_name(unsigned sc) { 1045 switch (sc) { 1046 case spv::StorageClassInput: 1047 return "input"; 1048 case spv::StorageClassOutput: 1049 return "output"; 1050 case spv::StorageClassUniformConstant: 1051 return "const uniform"; 1052 case spv::StorageClassUniform: 1053 return "uniform"; 1054 case spv::StorageClassWorkgroup: 1055 return "workgroup local"; 1056 case spv::StorageClassCrossWorkgroup: 1057 return "workgroup global"; 1058 case spv::StorageClassPrivate: 1059 return "private global"; 1060 case spv::StorageClassFunction: 1061 return "function"; 1062 case spv::StorageClassGeneric: 1063 return "generic"; 1064 case spv::StorageClassAtomicCounter: 1065 return "atomic counter"; 1066 case spv::StorageClassImage: 1067 return "image"; 1068 case spv::StorageClassPushConstant: 1069 return "push constant"; 1070 default: 1071 return "unknown"; 1072 } 1073} 1074 1075/* get the value of an integral constant */ 1076unsigned get_constant_value(shader_module const *src, unsigned id) { 1077 auto value = src->get_def(id); 1078 assert(value != src->end()); 1079 1080 if (value.opcode() != spv::OpConstant) { 1081 /* TODO: Either ensure that the specialization transform is already performed on a module we're 1082 considering here, OR -- specialize on the fly now. 1083 */ 1084 return 1; 1085 } 1086 1087 return value.word(3); 1088} 1089 1090 1091static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) { 1092 auto insn = src->get_def(type); 1093 assert(insn != src->end()); 1094 1095 switch (insn.opcode()) { 1096 case spv::OpTypeBool: 1097 ss << "bool"; 1098 break; 1099 case spv::OpTypeInt: 1100 ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2); 1101 break; 1102 case spv::OpTypeFloat: 1103 ss << "float" << insn.word(2); 1104 break; 1105 case spv::OpTypeVector: 1106 ss << "vec" << insn.word(3) << " of "; 1107 describe_type_inner(ss, src, insn.word(2)); 1108 break; 1109 case spv::OpTypeMatrix: 1110 ss << "mat" << insn.word(3) << " of "; 1111 describe_type_inner(ss, src, insn.word(2)); 1112 break; 1113 case spv::OpTypeArray: 1114 ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of "; 1115 describe_type_inner(ss, src, insn.word(2)); 1116 break; 1117 case spv::OpTypePointer: 1118 ss << "ptr to " << storage_class_name(insn.word(2)) << " "; 1119 describe_type_inner(ss, src, insn.word(3)); 1120 break; 1121 case spv::OpTypeStruct: { 1122 ss << "struct of ("; 1123 for (unsigned i = 2; i < insn.len(); i++) { 1124 describe_type_inner(ss, src, insn.word(i)); 1125 if (i == insn.len() - 1) { 1126 ss << ")"; 1127 } else { 1128 ss << ", "; 1129 } 1130 } 1131 break; 1132 } 1133 case spv::OpTypeSampler: 1134 ss << "sampler"; 1135 break; 1136 case spv::OpTypeSampledImage: 1137 ss << "sampler+"; 1138 describe_type_inner(ss, src, insn.word(2)); 1139 break; 1140 case spv::OpTypeImage: 1141 ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")"; 1142 break; 1143 default: 1144 ss << "oddtype"; 1145 break; 1146 } 1147} 1148 1149 1150static std::string describe_type(shader_module const *src, unsigned type) { 1151 std::ostringstream ss; 1152 describe_type_inner(ss, src, type); 1153 return ss.str(); 1154} 1155 1156 1157static bool is_narrow_numeric_type(spirv_inst_iter type) 1158{ 1159 if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat) 1160 return false; 1161 return type.word(2) < 64; 1162} 1163 1164 1165static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed, bool b_arrayed, bool relaxed) { 1166 /* walk two type trees together, and complain about differences */ 1167 auto a_insn = a->get_def(a_type); 1168 auto b_insn = b->get_def(b_type); 1169 assert(a_insn != a->end()); 1170 assert(b_insn != b->end()); 1171 1172 if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) { 1173 return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed); 1174 } 1175 1176 if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) { 1177 /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */ 1178 return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed); 1179 } 1180 1181 if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) { 1182 return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false); 1183 } 1184 1185 if (a_insn.opcode() != b_insn.opcode()) { 1186 return false; 1187 } 1188 1189 if (a_insn.opcode() == spv::OpTypePointer) { 1190 /* match on pointee type. storage class is expected to differ */ 1191 return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed); 1192 } 1193 1194 if (a_arrayed || b_arrayed) { 1195 /* if we havent resolved array-of-verts by here, we're not going to. */ 1196 return false; 1197 } 1198 1199 switch (a_insn.opcode()) { 1200 case spv::OpTypeBool: 1201 return true; 1202 case spv::OpTypeInt: 1203 /* match on width, signedness */ 1204 return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3); 1205 case spv::OpTypeFloat: 1206 /* match on width */ 1207 return a_insn.word(2) == b_insn.word(2); 1208 case spv::OpTypeVector: 1209 /* match on element type, count. */ 1210 if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false)) 1211 return false; 1212 if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) { 1213 return a_insn.word(3) >= b_insn.word(3); 1214 } 1215 else { 1216 return a_insn.word(3) == b_insn.word(3); 1217 } 1218 case spv::OpTypeMatrix: 1219 /* match on element type, count. */ 1220 return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && a_insn.word(3) == b_insn.word(3); 1221 case spv::OpTypeArray: 1222 /* match on element type, count. these all have the same layout. we don't get here if 1223 * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction, 1224 * not a literal within OpTypeArray */ 1225 return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && 1226 get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3)); 1227 case spv::OpTypeStruct: 1228 /* match on all element types */ 1229 { 1230 if (a_insn.len() != b_insn.len()) { 1231 return false; /* structs cannot match if member counts differ */ 1232 } 1233 1234 for (unsigned i = 2; i < a_insn.len(); i++) { 1235 if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) { 1236 return false; 1237 } 1238 } 1239 1240 return true; 1241 } 1242 default: 1243 /* remaining types are CLisms, or may not appear in the interfaces we 1244 * are interested in. Just claim no match. 1245 */ 1246 return false; 1247 } 1248} 1249 1250static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) { 1251 auto it = map.find(id); 1252 if (it == map.end()) 1253 return def; 1254 else 1255 return it->second; 1256} 1257 1258static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) { 1259 auto insn = src->get_def(type); 1260 assert(insn != src->end()); 1261 1262 switch (insn.opcode()) { 1263 case spv::OpTypePointer: 1264 /* see through the ptr -- this is only ever at the toplevel for graphics shaders; 1265 * we're never actually passing pointers around. */ 1266 return get_locations_consumed_by_type(src, insn.word(3), strip_array_level); 1267 case spv::OpTypeArray: 1268 if (strip_array_level) { 1269 return get_locations_consumed_by_type(src, insn.word(2), false); 1270 } else { 1271 return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false); 1272 } 1273 case spv::OpTypeMatrix: 1274 /* num locations is the dimension * element size */ 1275 return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false); 1276 case spv::OpTypeVector: { 1277 auto scalar_type = src->get_def(insn.word(2)); 1278 auto bit_width = (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ? 1279 scalar_type.word(2) : 32; 1280 1281 /* locations are 128-bit wide; 3- and 4-component vectors of 64 bit 1282 * types require two. */ 1283 return (bit_width * insn.word(3) + 127) / 128; 1284 } 1285 default: 1286 /* everything else is just 1. */ 1287 return 1; 1288 1289 /* TODO: extend to handle 64bit scalar types, whose vectors may need 1290 * multiple locations. */ 1291 } 1292} 1293 1294static unsigned get_locations_consumed_by_format(VkFormat format) { 1295 switch (format) { 1296 case VK_FORMAT_R64G64B64A64_SFLOAT: 1297 case VK_FORMAT_R64G64B64A64_SINT: 1298 case VK_FORMAT_R64G64B64A64_UINT: 1299 case VK_FORMAT_R64G64B64_SFLOAT: 1300 case VK_FORMAT_R64G64B64_SINT: 1301 case VK_FORMAT_R64G64B64_UINT: 1302 return 2; 1303 default: 1304 return 1; 1305 } 1306} 1307 1308typedef std::pair<unsigned, unsigned> location_t; 1309typedef std::pair<unsigned, unsigned> descriptor_slot_t; 1310 1311struct interface_var { 1312 uint32_t id; 1313 uint32_t type_id; 1314 uint32_t offset; 1315 bool is_patch; 1316 bool is_block_member; 1317 /* TODO: collect the name, too? Isn't required to be present. */ 1318}; 1319 1320struct shader_stage_attributes { 1321 char const *const name; 1322 bool arrayed_input; 1323 bool arrayed_output; 1324}; 1325 1326static shader_stage_attributes shader_stage_attribs[] = { 1327 {"vertex shader", false, false}, 1328 {"tessellation control shader", true, true}, 1329 {"tessellation evaluation shader", true, false}, 1330 {"geometry shader", true, false}, 1331 {"fragment shader", false, false}, 1332}; 1333 1334static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) { 1335 while (true) { 1336 1337 if (def.opcode() == spv::OpTypePointer) { 1338 def = src->get_def(def.word(3)); 1339 } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) { 1340 def = src->get_def(def.word(2)); 1341 is_array_of_verts = false; 1342 } else if (def.opcode() == spv::OpTypeStruct) { 1343 return def; 1344 } else { 1345 return src->end(); 1346 } 1347 } 1348} 1349 1350static void collect_interface_block_members(shader_module const *src, 1351 std::map<location_t, interface_var> &out, 1352 std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts, 1353 uint32_t id, uint32_t type_id, bool is_patch) { 1354 /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */ 1355 auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch); 1356 if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) { 1357 /* this isn't an interface block. */ 1358 return; 1359 } 1360 1361 std::unordered_map<unsigned, unsigned> member_components; 1362 1363 /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */ 1364 for (auto insn : *src) { 1365 if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) { 1366 unsigned member_index = insn.word(2); 1367 1368 if (insn.word(3) == spv::DecorationComponent) { 1369 unsigned component = insn.word(4); 1370 member_components[member_index] = component; 1371 } 1372 } 1373 } 1374 1375 /* Second pass -- produce the output, from Location decorations */ 1376 for (auto insn : *src) { 1377 if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) { 1378 unsigned member_index = insn.word(2); 1379 unsigned member_type_id = type.word(2 + member_index); 1380 1381 if (insn.word(3) == spv::DecorationLocation) { 1382 unsigned location = insn.word(4); 1383 unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false); 1384 auto component_it = member_components.find(member_index); 1385 unsigned component = component_it == member_components.end() ? 0 : component_it->second; 1386 1387 for (unsigned int offset = 0; offset < num_locations; offset++) { 1388 interface_var v; 1389 v.id = id; 1390 /* TODO: member index in interface_var too? */ 1391 v.type_id = member_type_id; 1392 v.offset = offset; 1393 v.is_patch = is_patch; 1394 v.is_block_member = true; 1395 out[std::make_pair(location + offset, component)] = v; 1396 } 1397 } 1398 } 1399 } 1400} 1401 1402static void collect_interface_by_location(shader_module const *src, spirv_inst_iter entrypoint, 1403 spv::StorageClass sinterface, std::map<location_t, interface_var> &out, 1404 bool is_array_of_verts) { 1405 std::unordered_map<unsigned, unsigned> var_locations; 1406 std::unordered_map<unsigned, unsigned> var_builtins; 1407 std::unordered_map<unsigned, unsigned> var_components; 1408 std::unordered_map<unsigned, unsigned> blocks; 1409 std::unordered_map<unsigned, unsigned> var_patch; 1410 1411 for (auto insn : *src) { 1412 1413 /* We consider two interface models: SSO rendezvous-by-location, and 1414 * builtins. Complain about anything that fits neither model. 1415 */ 1416 if (insn.opcode() == spv::OpDecorate) { 1417 if (insn.word(2) == spv::DecorationLocation) { 1418 var_locations[insn.word(1)] = insn.word(3); 1419 } 1420 1421 if (insn.word(2) == spv::DecorationBuiltIn) { 1422 var_builtins[insn.word(1)] = insn.word(3); 1423 } 1424 1425 if (insn.word(2) == spv::DecorationComponent) { 1426 var_components[insn.word(1)] = insn.word(3); 1427 } 1428 1429 if (insn.word(2) == spv::DecorationBlock) { 1430 blocks[insn.word(1)] = 1; 1431 } 1432 1433 if (insn.word(2) == spv::DecorationPatch) { 1434 var_patch[insn.word(1)] = 1; 1435 } 1436 } 1437 } 1438 1439 /* TODO: handle grouped decorations */ 1440 /* TODO: handle index=1 dual source outputs from FS -- two vars will 1441 * have the same location, and we DON'T want to clobber. */ 1442 1443 /* find the end of the entrypoint's name string. additional zero bytes follow the actual null 1444 terminator, to fill out the rest of the word - so we only need to look at the last byte in 1445 the word to determine which word contains the terminator. */ 1446 uint32_t word = 3; 1447 while (entrypoint.word(word) & 0xff000000u) { 1448 ++word; 1449 } 1450 ++word; 1451 1452 for (; word < entrypoint.len(); word++) { 1453 auto insn = src->get_def(entrypoint.word(word)); 1454 assert(insn != src->end()); 1455 assert(insn.opcode() == spv::OpVariable); 1456 1457 if (insn.word(3) == static_cast<uint32_t>(sinterface)) { 1458 unsigned id = insn.word(2); 1459 unsigned type = insn.word(1); 1460 1461 int location = value_or_default(var_locations, id, -1); 1462 int builtin = value_or_default(var_builtins, id, -1); 1463 unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */ 1464 bool is_patch = var_patch.find(id) != var_patch.end(); 1465 1466 /* All variables and interface block members in the Input or Output storage classes 1467 * must be decorated with either a builtin or an explicit location. 1468 * 1469 * TODO: integrate the interface block support here. For now, don't complain -- 1470 * a valid SPIRV module will only hit this path for the interface block case, as the 1471 * individual members of the type are decorated, rather than variable declarations. 1472 */ 1473 1474 if (location != -1) { 1475 /* A user-defined interface variable, with a location. Where a variable 1476 * occupied multiple locations, emit one result for each. */ 1477 unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch); 1478 for (unsigned int offset = 0; offset < num_locations; offset++) { 1479 interface_var v; 1480 v.id = id; 1481 v.type_id = type; 1482 v.offset = offset; 1483 v.is_patch = is_patch; 1484 v.is_block_member = false; 1485 out[std::make_pair(location + offset, component)] = v; 1486 } 1487 } else if (builtin == -1) { 1488 /* An interface block instance */ 1489 collect_interface_block_members(src, out, blocks, is_array_of_verts, id, type, is_patch); 1490 } 1491 } 1492 } 1493} 1494 1495static void collect_interface_by_descriptor_slot(debug_report_data *report_data, shader_module const *src, 1496 std::unordered_set<uint32_t> const &accessible_ids, 1497 std::map<descriptor_slot_t, interface_var> &out) { 1498 1499 std::unordered_map<unsigned, unsigned> var_sets; 1500 std::unordered_map<unsigned, unsigned> var_bindings; 1501 1502 for (auto insn : *src) { 1503 /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both 1504 * DecorationDescriptorSet and DecorationBinding. 1505 */ 1506 if (insn.opcode() == spv::OpDecorate) { 1507 if (insn.word(2) == spv::DecorationDescriptorSet) { 1508 var_sets[insn.word(1)] = insn.word(3); 1509 } 1510 1511 if (insn.word(2) == spv::DecorationBinding) { 1512 var_bindings[insn.word(1)] = insn.word(3); 1513 } 1514 } 1515 } 1516 1517 for (auto id : accessible_ids) { 1518 auto insn = src->get_def(id); 1519 assert(insn != src->end()); 1520 1521 if (insn.opcode() == spv::OpVariable && 1522 (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) { 1523 unsigned set = value_or_default(var_sets, insn.word(2), 0); 1524 unsigned binding = value_or_default(var_bindings, insn.word(2), 0); 1525 1526 auto existing_it = out.find(std::make_pair(set, binding)); 1527 if (existing_it != out.end()) { 1528 /* conflict within spv image */ 1529 log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1530 __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC", 1531 "var %d (type %d) in %s interface in descriptor slot (%u,%u) conflicts with existing definition", 1532 insn.word(2), insn.word(1), storage_class_name(insn.word(3)), existing_it->first.first, 1533 existing_it->first.second); 1534 } 1535 1536 interface_var v; 1537 v.id = insn.word(2); 1538 v.type_id = insn.word(1); 1539 v.offset = 0; 1540 v.is_patch = false; 1541 v.is_block_member = false; 1542 out[std::make_pair(set, binding)] = v; 1543 } 1544 } 1545} 1546 1547static bool validate_interface_between_stages(debug_report_data *report_data, shader_module const *producer, 1548 spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage, 1549 shader_module const *consumer, spirv_inst_iter consumer_entrypoint, 1550 shader_stage_attributes const *consumer_stage) { 1551 std::map<location_t, interface_var> outputs; 1552 std::map<location_t, interface_var> inputs; 1553 1554 bool pass = true; 1555 1556 collect_interface_by_location(producer, producer_entrypoint, spv::StorageClassOutput, outputs, producer_stage->arrayed_output); 1557 collect_interface_by_location(consumer, consumer_entrypoint, spv::StorageClassInput, inputs, consumer_stage->arrayed_input); 1558 1559 auto a_it = outputs.begin(); 1560 auto b_it = inputs.begin(); 1561 1562 /* maps sorted by key (location); walk them together to find mismatches */ 1563 while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) { 1564 bool a_at_end = outputs.size() == 0 || a_it == outputs.end(); 1565 bool b_at_end = inputs.size() == 0 || b_it == inputs.end(); 1566 auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first; 1567 auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first; 1568 1569 if (b_at_end || ((!a_at_end) && (a_first < b_first))) { 1570 if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1571 __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC", 1572 "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first, 1573 a_first.second, consumer_stage->name)) { 1574 pass = false; 1575 } 1576 a_it++; 1577 } else if (a_at_end || a_first > b_first) { 1578 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1579 __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", 1580 "%s consumes input location %u.%u which is not written by %s", consumer_stage->name, b_first.first, b_first.second, 1581 producer_stage->name)) { 1582 pass = false; 1583 } 1584 b_it++; 1585 } else { 1586 // subtleties of arrayed interfaces: 1587 // - if is_patch, then the member is not arrayed, even though the interface may be. 1588 // - if is_block_member, then the extra array level of an arrayed interface is not 1589 // expressed in the member type -- it's expressed in the block type. 1590 if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id, 1591 producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member, 1592 consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member, 1593 true)) { 1594 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1595 __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'", 1596 a_first.first, a_first.second, 1597 describe_type(producer, a_it->second.type_id).c_str(), 1598 describe_type(consumer, b_it->second.type_id).c_str())) { 1599 pass = false; 1600 } 1601 } 1602 if (a_it->second.is_patch != b_it->second.is_patch) { 1603 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0, 1604 __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", 1605 "Decoration mismatch on location %u.%u: is per-%s in %s stage but " 1606 "per-%s in %s stage", a_first.first, a_first.second, 1607 a_it->second.is_patch ? "patch" : "vertex", producer_stage->name, 1608 b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) { 1609 pass = false; 1610 } 1611 } 1612 a_it++; 1613 b_it++; 1614 } 1615 } 1616 1617 return pass; 1618} 1619 1620enum FORMAT_TYPE { 1621 FORMAT_TYPE_UNDEFINED, 1622 FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */ 1623 FORMAT_TYPE_SINT, 1624 FORMAT_TYPE_UINT, 1625}; 1626 1627static unsigned get_format_type(VkFormat fmt) { 1628 switch (fmt) { 1629 case VK_FORMAT_UNDEFINED: 1630 return FORMAT_TYPE_UNDEFINED; 1631 case VK_FORMAT_R8_SINT: 1632 case VK_FORMAT_R8G8_SINT: 1633 case VK_FORMAT_R8G8B8_SINT: 1634 case VK_FORMAT_R8G8B8A8_SINT: 1635 case VK_FORMAT_R16_SINT: 1636 case VK_FORMAT_R16G16_SINT: 1637 case VK_FORMAT_R16G16B16_SINT: 1638 case VK_FORMAT_R16G16B16A16_SINT: 1639 case VK_FORMAT_R32_SINT: 1640 case VK_FORMAT_R32G32_SINT: 1641 case VK_FORMAT_R32G32B32_SINT: 1642 case VK_FORMAT_R32G32B32A32_SINT: 1643 case VK_FORMAT_R64_SINT: 1644 case VK_FORMAT_R64G64_SINT: 1645 case VK_FORMAT_R64G64B64_SINT: 1646 case VK_FORMAT_R64G64B64A64_SINT: 1647 case VK_FORMAT_B8G8R8_SINT: 1648 case VK_FORMAT_B8G8R8A8_SINT: 1649 case VK_FORMAT_A8B8G8R8_SINT_PACK32: 1650 case VK_FORMAT_A2B10G10R10_SINT_PACK32: 1651 case VK_FORMAT_A2R10G10B10_SINT_PACK32: 1652 return FORMAT_TYPE_SINT; 1653 case VK_FORMAT_R8_UINT: 1654 case VK_FORMAT_R8G8_UINT: 1655 case VK_FORMAT_R8G8B8_UINT: 1656 case VK_FORMAT_R8G8B8A8_UINT: 1657 case VK_FORMAT_R16_UINT: 1658 case VK_FORMAT_R16G16_UINT: 1659 case VK_FORMAT_R16G16B16_UINT: 1660 case VK_FORMAT_R16G16B16A16_UINT: 1661 case VK_FORMAT_R32_UINT: 1662 case VK_FORMAT_R32G32_UINT: 1663 case VK_FORMAT_R32G32B32_UINT: 1664 case VK_FORMAT_R32G32B32A32_UINT: 1665 case VK_FORMAT_R64_UINT: 1666 case VK_FORMAT_R64G64_UINT: 1667 case VK_FORMAT_R64G64B64_UINT: 1668 case VK_FORMAT_R64G64B64A64_UINT: 1669 case VK_FORMAT_B8G8R8_UINT: 1670 case VK_FORMAT_B8G8R8A8_UINT: 1671 case VK_FORMAT_A8B8G8R8_UINT_PACK32: 1672 case VK_FORMAT_A2B10G10R10_UINT_PACK32: 1673 case VK_FORMAT_A2R10G10B10_UINT_PACK32: 1674 return FORMAT_TYPE_UINT; 1675 default: 1676 return FORMAT_TYPE_FLOAT; 1677 } 1678} 1679 1680/* characterizes a SPIR-V type appearing in an interface to a FF stage, 1681 * for comparison to a VkFormat's characterization above. */ 1682static unsigned get_fundamental_type(shader_module const *src, unsigned type) { 1683 auto insn = src->get_def(type); 1684 assert(insn != src->end()); 1685 1686 switch (insn.opcode()) { 1687 case spv::OpTypeInt: 1688 return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT; 1689 case spv::OpTypeFloat: 1690 return FORMAT_TYPE_FLOAT; 1691 case spv::OpTypeVector: 1692 return get_fundamental_type(src, insn.word(2)); 1693 case spv::OpTypeMatrix: 1694 return get_fundamental_type(src, insn.word(2)); 1695 case spv::OpTypeArray: 1696 return get_fundamental_type(src, insn.word(2)); 1697 case spv::OpTypePointer: 1698 return get_fundamental_type(src, insn.word(3)); 1699 default: 1700 return FORMAT_TYPE_UNDEFINED; 1701 } 1702} 1703 1704static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) { 1705 uint32_t bit_pos = u_ffs(stage); 1706 return bit_pos - 1; 1707} 1708 1709static bool validate_vi_consistency(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi) { 1710 /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer. 1711 * each binding should be specified only once. 1712 */ 1713 std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings; 1714 bool pass = true; 1715 1716 for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) { 1717 auto desc = &vi->pVertexBindingDescriptions[i]; 1718 auto &binding = bindings[desc->binding]; 1719 if (binding) { 1720 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1721 __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC", 1722 "Duplicate vertex input binding descriptions for binding %d", desc->binding)) { 1723 pass = false; 1724 } 1725 } else { 1726 binding = desc; 1727 } 1728 } 1729 1730 return pass; 1731} 1732 1733static bool validate_vi_against_vs_inputs(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi, 1734 shader_module const *vs, spirv_inst_iter entrypoint) { 1735 std::map<location_t, interface_var> inputs; 1736 bool pass = true; 1737 1738 collect_interface_by_location(vs, entrypoint, spv::StorageClassInput, inputs, false); 1739 1740 /* Build index by location */ 1741 std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs; 1742 if (vi) { 1743 for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) { 1744 auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format); 1745 for (auto j = 0u; j < num_locations; j++) { 1746 attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i]; 1747 } 1748 } 1749 } 1750 1751 auto it_a = attribs.begin(); 1752 auto it_b = inputs.begin(); 1753 bool used = false; 1754 1755 while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) { 1756 bool a_at_end = attribs.size() == 0 || it_a == attribs.end(); 1757 bool b_at_end = inputs.size() == 0 || it_b == inputs.end(); 1758 auto a_first = a_at_end ? 0 : it_a->first; 1759 auto b_first = b_at_end ? 0 : it_b->first.first; 1760 if (!a_at_end && (b_at_end || a_first < b_first)) { 1761 if (!used && log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1762 __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC", 1763 "Vertex attribute at location %d not consumed by VS", a_first)) { 1764 pass = false; 1765 } 1766 used = false; 1767 it_a++; 1768 } else if (!b_at_end && (a_at_end || b_first < a_first)) { 1769 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0, 1770 __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "VS consumes input at location %d but not provided", 1771 b_first)) { 1772 pass = false; 1773 } 1774 it_b++; 1775 } else { 1776 unsigned attrib_type = get_format_type(it_a->second->format); 1777 unsigned input_type = get_fundamental_type(vs, it_b->second.type_id); 1778 1779 /* type checking */ 1780 if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) { 1781 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1782 __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", 1783 "Attribute type of `%s` at location %d does not match VS input type of `%s`", 1784 string_VkFormat(it_a->second->format), a_first, 1785 describe_type(vs, it_b->second.type_id).c_str())) { 1786 pass = false; 1787 } 1788 } 1789 1790 /* OK! */ 1791 used = true; 1792 it_b++; 1793 } 1794 } 1795 1796 return pass; 1797} 1798 1799static bool validate_fs_outputs_against_render_pass(debug_report_data *report_data, shader_module const *fs, 1800 spirv_inst_iter entrypoint, VkRenderPassCreateInfo const *rpci, 1801 uint32_t subpass_index) { 1802 std::map<location_t, interface_var> outputs; 1803 std::map<uint32_t, VkFormat> color_attachments; 1804 auto subpass = rpci->pSubpasses[subpass_index]; 1805 for (auto i = 0u; i < subpass.colorAttachmentCount; ++i) { 1806 uint32_t attachment = subpass.pColorAttachments[i].attachment; 1807 if (attachment == VK_ATTACHMENT_UNUSED) 1808 continue; 1809 if (rpci->pAttachments[attachment].format != VK_FORMAT_UNDEFINED) { 1810 color_attachments[i] = rpci->pAttachments[attachment].format; 1811 } 1812 } 1813 1814 bool pass = true; 1815 1816 /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */ 1817 1818 collect_interface_by_location(fs, entrypoint, spv::StorageClassOutput, outputs, false); 1819 1820 auto it_a = outputs.begin(); 1821 auto it_b = color_attachments.begin(); 1822 1823 /* Walk attachment list and outputs together */ 1824 1825 while ((outputs.size() > 0 && it_a != outputs.end()) || (color_attachments.size() > 0 && it_b != color_attachments.end())) { 1826 bool a_at_end = outputs.size() == 0 || it_a == outputs.end(); 1827 bool b_at_end = color_attachments.size() == 0 || it_b == color_attachments.end(); 1828 1829 if (!a_at_end && (b_at_end || it_a->first.first < it_b->first)) { 1830 if (log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1831 __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC", 1832 "FS writes to output location %d with no matching attachment", it_a->first.first)) { 1833 pass = false; 1834 } 1835 it_a++; 1836 } else if (!b_at_end && (a_at_end || it_a->first.first > it_b->first)) { 1837 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1838 __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by FS", it_b->first)) { 1839 pass = false; 1840 } 1841 it_b++; 1842 } else { 1843 unsigned output_type = get_fundamental_type(fs, it_a->second.type_id); 1844 unsigned att_type = get_format_type(it_b->second); 1845 1846 /* type checking */ 1847 if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) { 1848 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1849 __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", 1850 "Attachment %d of type `%s` does not match FS output type of `%s`", it_b->first, 1851 string_VkFormat(it_b->second), 1852 describe_type(fs, it_a->second.type_id).c_str())) { 1853 pass = false; 1854 } 1855 } 1856 1857 /* OK! */ 1858 it_a++; 1859 it_b++; 1860 } 1861 } 1862 1863 return pass; 1864} 1865 1866/* For some analyses, we need to know about all ids referenced by the static call tree of a particular 1867 * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint, 1868 * for example. 1869 * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses. 1870 * - NOT the shader input/output interfaces. 1871 * 1872 * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth 1873 * converting parts of this to be generated from the machine-readable spec instead. 1874 */ 1875static void mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint, std::unordered_set<uint32_t> &ids) { 1876 std::unordered_set<uint32_t> worklist; 1877 worklist.insert(entrypoint.word(2)); 1878 1879 while (!worklist.empty()) { 1880 auto id_iter = worklist.begin(); 1881 auto id = *id_iter; 1882 worklist.erase(id_iter); 1883 1884 auto insn = src->get_def(id); 1885 if (insn == src->end()) { 1886 /* id is something we didn't collect in build_def_index. that's OK -- we'll stumble 1887 * across all kinds of things here that we may not care about. */ 1888 continue; 1889 } 1890 1891 /* try to add to the output set */ 1892 if (!ids.insert(id).second) { 1893 continue; /* if we already saw this id, we don't want to walk it again. */ 1894 } 1895 1896 switch (insn.opcode()) { 1897 case spv::OpFunction: 1898 /* scan whole body of the function, enlisting anything interesting */ 1899 while (++insn, insn.opcode() != spv::OpFunctionEnd) { 1900 switch (insn.opcode()) { 1901 case spv::OpLoad: 1902 case spv::OpAtomicLoad: 1903 case spv::OpAtomicExchange: 1904 case spv::OpAtomicCompareExchange: 1905 case spv::OpAtomicCompareExchangeWeak: 1906 case spv::OpAtomicIIncrement: 1907 case spv::OpAtomicIDecrement: 1908 case spv::OpAtomicIAdd: 1909 case spv::OpAtomicISub: 1910 case spv::OpAtomicSMin: 1911 case spv::OpAtomicUMin: 1912 case spv::OpAtomicSMax: 1913 case spv::OpAtomicUMax: 1914 case spv::OpAtomicAnd: 1915 case spv::OpAtomicOr: 1916 case spv::OpAtomicXor: 1917 worklist.insert(insn.word(3)); /* ptr */ 1918 break; 1919 case spv::OpStore: 1920 case spv::OpAtomicStore: 1921 worklist.insert(insn.word(1)); /* ptr */ 1922 break; 1923 case spv::OpAccessChain: 1924 case spv::OpInBoundsAccessChain: 1925 worklist.insert(insn.word(3)); /* base ptr */ 1926 break; 1927 case spv::OpSampledImage: 1928 case spv::OpImageSampleImplicitLod: 1929 case spv::OpImageSampleExplicitLod: 1930 case spv::OpImageSampleDrefImplicitLod: 1931 case spv::OpImageSampleDrefExplicitLod: 1932 case spv::OpImageSampleProjImplicitLod: 1933 case spv::OpImageSampleProjExplicitLod: 1934 case spv::OpImageSampleProjDrefImplicitLod: 1935 case spv::OpImageSampleProjDrefExplicitLod: 1936 case spv::OpImageFetch: 1937 case spv::OpImageGather: 1938 case spv::OpImageDrefGather: 1939 case spv::OpImageRead: 1940 case spv::OpImage: 1941 case spv::OpImageQueryFormat: 1942 case spv::OpImageQueryOrder: 1943 case spv::OpImageQuerySizeLod: 1944 case spv::OpImageQuerySize: 1945 case spv::OpImageQueryLod: 1946 case spv::OpImageQueryLevels: 1947 case spv::OpImageQuerySamples: 1948 case spv::OpImageSparseSampleImplicitLod: 1949 case spv::OpImageSparseSampleExplicitLod: 1950 case spv::OpImageSparseSampleDrefImplicitLod: 1951 case spv::OpImageSparseSampleDrefExplicitLod: 1952 case spv::OpImageSparseSampleProjImplicitLod: 1953 case spv::OpImageSparseSampleProjExplicitLod: 1954 case spv::OpImageSparseSampleProjDrefImplicitLod: 1955 case spv::OpImageSparseSampleProjDrefExplicitLod: 1956 case spv::OpImageSparseFetch: 1957 case spv::OpImageSparseGather: 1958 case spv::OpImageSparseDrefGather: 1959 case spv::OpImageTexelPointer: 1960 worklist.insert(insn.word(3)); /* image or sampled image */ 1961 break; 1962 case spv::OpImageWrite: 1963 worklist.insert(insn.word(1)); /* image -- different operand order to above */ 1964 break; 1965 case spv::OpFunctionCall: 1966 for (uint32_t i = 3; i < insn.len(); i++) { 1967 worklist.insert(insn.word(i)); /* fn itself, and all args */ 1968 } 1969 break; 1970 1971 case spv::OpExtInst: 1972 for (uint32_t i = 5; i < insn.len(); i++) { 1973 worklist.insert(insn.word(i)); /* operands to ext inst */ 1974 } 1975 break; 1976 } 1977 } 1978 break; 1979 } 1980 } 1981} 1982 1983static bool validate_push_constant_block_against_pipeline(debug_report_data *report_data, 1984 std::vector<VkPushConstantRange> const *pushConstantRanges, 1985 shader_module const *src, spirv_inst_iter type, 1986 VkShaderStageFlagBits stage) { 1987 bool pass = true; 1988 1989 /* strip off ptrs etc */ 1990 type = get_struct_type(src, type, false); 1991 assert(type != src->end()); 1992 1993 /* validate directly off the offsets. this isn't quite correct for arrays 1994 * and matrices, but is a good first step. TODO: arrays, matrices, weird 1995 * sizes */ 1996 for (auto insn : *src) { 1997 if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) { 1998 1999 if (insn.word(3) == spv::DecorationOffset) { 2000 unsigned offset = insn.word(4); 2001 auto size = 4; /* bytes; TODO: calculate this based on the type */ 2002 2003 bool found_range = false; 2004 for (auto const &range : *pushConstantRanges) { 2005 if (range.offset <= offset && range.offset + range.size >= offset + size) { 2006 found_range = true; 2007 2008 if ((range.stageFlags & stage) == 0) { 2009 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 2010 __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC", 2011 "Push constant range covering variable starting at " 2012 "offset %u not accessible from stage %s", 2013 offset, string_VkShaderStageFlagBits(stage))) { 2014 pass = false; 2015 } 2016 } 2017 2018 break; 2019 } 2020 } 2021 2022 if (!found_range) { 2023 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 2024 __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC", 2025 "Push constant range covering variable starting at " 2026 "offset %u not declared in layout", 2027 offset)) { 2028 pass = false; 2029 } 2030 } 2031 } 2032 } 2033 } 2034 2035 return pass; 2036} 2037 2038static bool validate_push_constant_usage(debug_report_data *report_data, 2039 std::vector<VkPushConstantRange> const *pushConstantRanges, shader_module const *src, 2040 std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) { 2041 bool pass = true; 2042 2043 for (auto id : accessible_ids) { 2044 auto def_insn = src->get_def(id); 2045 if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) { 2046 pass &= validate_push_constant_block_against_pipeline(report_data, pushConstantRanges, src, 2047 src->get_def(def_insn.word(1)), stage); 2048 } 2049 } 2050 2051 return pass; 2052} 2053 2054// For given pipelineLayout verify that the set_layout_node at slot.first 2055// has the requested binding at slot.second and return ptr to that binding 2056static VkDescriptorSetLayoutBinding const * get_descriptor_binding(PIPELINE_LAYOUT_NODE const *pipelineLayout, descriptor_slot_t slot) { 2057 2058 if (!pipelineLayout) 2059 return nullptr; 2060 2061 if (slot.first >= pipelineLayout->descriptorSetLayouts.size()) 2062 return nullptr; 2063 2064 return pipelineLayout->setLayouts[slot.first]->GetDescriptorSetLayoutBindingPtrFromBinding(slot.second); 2065} 2066 2067// Block of code at start here for managing/tracking Pipeline state that this layer cares about 2068 2069static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0}; 2070 2071// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound 2072// Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates 2073// to that same cmd buffer by separate thread are not changing state from underneath us 2074// Track the last cmd buffer touched by this thread 2075 2076static bool hasDrawCmd(GLOBAL_CB_NODE *pCB) { 2077 for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) { 2078 if (pCB->drawCount[i]) 2079 return true; 2080 } 2081 return false; 2082} 2083 2084// Check object status for selected flag state 2085static bool validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags, 2086 DRAW_STATE_ERROR error_code, const char *fail_msg) { 2087 if (!(pNode->status & status_mask)) { 2088 return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 2089 reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, error_code, "DS", 2090 "CB object 0x%" PRIxLEAST64 ": %s", reinterpret_cast<const uint64_t &>(pNode->commandBuffer), fail_msg); 2091 } 2092 return false; 2093} 2094 2095// Retrieve pipeline node ptr for given pipeline object 2096static PIPELINE_NODE *getPipeline(layer_data const *my_data, VkPipeline pipeline) { 2097 auto it = my_data->pipelineMap.find(pipeline); 2098 if (it == my_data->pipelineMap.end()) { 2099 return nullptr; 2100 } 2101 return it->second; 2102} 2103 2104static RENDER_PASS_NODE *getRenderPass(layer_data const *my_data, VkRenderPass renderpass) { 2105 auto it = my_data->renderPassMap.find(renderpass); 2106 if (it == my_data->renderPassMap.end()) { 2107 return nullptr; 2108 } 2109 return it->second; 2110} 2111 2112static FRAMEBUFFER_NODE *getFramebuffer(const layer_data *my_data, VkFramebuffer framebuffer) { 2113 auto it = my_data->frameBufferMap.find(framebuffer); 2114 if (it == my_data->frameBufferMap.end()) { 2115 return nullptr; 2116 } 2117 return it->second.get(); 2118} 2119 2120cvdescriptorset::DescriptorSetLayout const *getDescriptorSetLayout(layer_data const *my_data, VkDescriptorSetLayout dsLayout) { 2121 auto it = my_data->descriptorSetLayoutMap.find(dsLayout); 2122 if (it == my_data->descriptorSetLayoutMap.end()) { 2123 return nullptr; 2124 } 2125 return it->second; 2126} 2127 2128static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *my_data, VkPipelineLayout pipeLayout) { 2129 auto it = my_data->pipelineLayoutMap.find(pipeLayout); 2130 if (it == my_data->pipelineLayoutMap.end()) { 2131 return nullptr; 2132 } 2133 return &it->second; 2134} 2135 2136// Return true if for a given PSO, the given state enum is dynamic, else return false 2137static bool isDynamic(const PIPELINE_NODE *pPipeline, const VkDynamicState state) { 2138 if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) { 2139 for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) { 2140 if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) 2141 return true; 2142 } 2143 } 2144 return false; 2145} 2146 2147// Validate state stored as flags at time of draw call 2148static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe, bool indexedDraw) { 2149 bool result; 2150 result = validate_status(dev_data, pCB, CBSTATUS_VIEWPORT_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_VIEWPORT_NOT_BOUND, 2151 "Dynamic viewport state not set for this command buffer"); 2152 result |= validate_status(dev_data, pCB, CBSTATUS_SCISSOR_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_SCISSOR_NOT_BOUND, 2153 "Dynamic scissor state not set for this command buffer"); 2154 if (pPipe->graphicsPipelineCI.pInputAssemblyState && 2155 ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) || 2156 (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) { 2157 result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2158 DRAWSTATE_LINE_WIDTH_NOT_BOUND, "Dynamic line width state not set for this command buffer"); 2159 } 2160 if (pPipe->graphicsPipelineCI.pRasterizationState && 2161 (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) { 2162 result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2163 DRAWSTATE_DEPTH_BIAS_NOT_BOUND, "Dynamic depth bias state not set for this command buffer"); 2164 } 2165 if (pPipe->blendConstantsEnabled) { 2166 result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2167 DRAWSTATE_BLEND_NOT_BOUND, "Dynamic blend constants state not set for this command buffer"); 2168 } 2169 if (pPipe->graphicsPipelineCI.pDepthStencilState && 2170 (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) { 2171 result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2172 DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, "Dynamic depth bounds state not set for this command buffer"); 2173 } 2174 if (pPipe->graphicsPipelineCI.pDepthStencilState && 2175 (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) { 2176 result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2177 DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil read mask state not set for this command buffer"); 2178 result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2179 DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil write mask state not set for this command buffer"); 2180 result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2181 DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil reference state not set for this command buffer"); 2182 } 2183 if (indexedDraw) { 2184 result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2185 DRAWSTATE_INDEX_BUFFER_NOT_BOUND, 2186 "Index buffer object not bound to this command buffer when Indexed Draw attempted"); 2187 } 2188 return result; 2189} 2190 2191// Verify attachment reference compatibility according to spec 2192// If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this 2193// If both AttachmentReference arrays have requested index, check their corresponding AttachmentDescriptions 2194// to make sure that format and samples counts match. 2195// If not, they are not compatible. 2196static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary, 2197 const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments, 2198 const VkAttachmentReference *pSecondary, const uint32_t secondaryCount, 2199 const VkAttachmentDescription *pSecondaryAttachments) { 2200 // Check potential NULL cases first to avoid nullptr issues later 2201 if (pPrimary == nullptr) { 2202 if (pSecondary == nullptr) { 2203 return true; 2204 } 2205 return false; 2206 } else if (pSecondary == nullptr) { 2207 return false; 2208 } 2209 if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED 2210 if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment) 2211 return true; 2212 } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED 2213 if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment) 2214 return true; 2215 } else { // Format and sample count must match 2216 if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) && (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) { 2217 return true; 2218 } else if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) || (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) { 2219 return false; 2220 } 2221 if ((pPrimaryAttachments[pPrimary[index].attachment].format == 2222 pSecondaryAttachments[pSecondary[index].attachment].format) && 2223 (pPrimaryAttachments[pPrimary[index].attachment].samples == 2224 pSecondaryAttachments[pSecondary[index].attachment].samples)) 2225 return true; 2226 } 2227 // Format and sample counts didn't match 2228 return false; 2229} 2230// TODO : Scrub verify_renderpass_compatibility() and validateRenderPassCompatibility() and unify them and/or share code 2231// For given primary RenderPass object and secondry RenderPassCreateInfo, verify that they're compatible 2232static bool verify_renderpass_compatibility(const layer_data *my_data, const VkRenderPassCreateInfo *primaryRPCI, 2233 const VkRenderPassCreateInfo *secondaryRPCI, string &errorMsg) { 2234 if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) { 2235 stringstream errorStr; 2236 errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount 2237 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses."; 2238 errorMsg = errorStr.str(); 2239 return false; 2240 } 2241 uint32_t spIndex = 0; 2242 for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) { 2243 // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible 2244 uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount; 2245 uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount; 2246 uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount); 2247 for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) { 2248 if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount, 2249 primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments, 2250 secondaryColorCount, secondaryRPCI->pAttachments)) { 2251 stringstream errorStr; 2252 errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible."; 2253 errorMsg = errorStr.str(); 2254 return false; 2255 } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments, 2256 primaryColorCount, primaryRPCI->pAttachments, 2257 secondaryRPCI->pSubpasses[spIndex].pResolveAttachments, 2258 secondaryColorCount, secondaryRPCI->pAttachments)) { 2259 stringstream errorStr; 2260 errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible."; 2261 errorMsg = errorStr.str(); 2262 return false; 2263 } 2264 } 2265 2266 if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment, 2267 1, primaryRPCI->pAttachments, 2268 secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment, 2269 1, secondaryRPCI->pAttachments)) { 2270 stringstream errorStr; 2271 errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible."; 2272 errorMsg = errorStr.str(); 2273 return false; 2274 } 2275 2276 uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount; 2277 uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount; 2278 uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount); 2279 for (uint32_t i = 0; i < inputMax; ++i) { 2280 if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount, 2281 primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments, 2282 secondaryColorCount, secondaryRPCI->pAttachments)) { 2283 stringstream errorStr; 2284 errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible."; 2285 errorMsg = errorStr.str(); 2286 return false; 2287 } 2288 } 2289 } 2290 return true; 2291} 2292 2293// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to 2294// pipelineLayout[layoutIndex] 2295static bool verify_set_layout_compatibility(layer_data *my_data, const cvdescriptorset::DescriptorSet *pSet, 2296 const VkPipelineLayout layout, const uint32_t layoutIndex, string &errorMsg) { 2297 auto pipeline_layout = getPipelineLayout(my_data, layout); 2298 if (!pipeline_layout) { 2299 stringstream errorStr; 2300 errorStr << "invalid VkPipelineLayout (" << layout << ")"; 2301 errorMsg = errorStr.str(); 2302 return false; 2303 } 2304 if (layoutIndex >= pipeline_layout->descriptorSetLayouts.size()) { 2305 stringstream errorStr; 2306 errorStr << "VkPipelineLayout (" << layout << ") only contains " << pipeline_layout->descriptorSetLayouts.size() 2307 << " setLayouts corresponding to sets 0-" << pipeline_layout->descriptorSetLayouts.size() - 1 2308 << ", but you're attempting to bind set to index " << layoutIndex; 2309 errorMsg = errorStr.str(); 2310 return false; 2311 } 2312 auto layout_node = pipeline_layout->setLayouts[layoutIndex]; 2313 return pSet->IsCompatible(layout_node, &errorMsg); 2314} 2315 2316// Validate that data for each specialization entry is fully contained within the buffer. 2317static bool validate_specialization_offsets(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *info) { 2318 bool pass = true; 2319 2320 VkSpecializationInfo const *spec = info->pSpecializationInfo; 2321 2322 if (spec) { 2323 for (auto i = 0u; i < spec->mapEntryCount; i++) { 2324 if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) { 2325 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 2326 /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC", 2327 "Specialization entry %u (for constant id %u) references memory outside provided " 2328 "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER 2329 " bytes provided)", 2330 i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset, 2331 spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) { 2332 2333 pass = false; 2334 } 2335 } 2336 } 2337 } 2338 2339 return pass; 2340} 2341 2342static bool descriptor_type_match(shader_module const *module, uint32_t type_id, 2343 VkDescriptorType descriptor_type, unsigned &descriptor_count) { 2344 auto type = module->get_def(type_id); 2345 2346 descriptor_count = 1; 2347 2348 /* Strip off any array or ptrs. Where we remove array levels, adjust the 2349 * descriptor count for each dimension. */ 2350 while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) { 2351 if (type.opcode() == spv::OpTypeArray) { 2352 descriptor_count *= get_constant_value(module, type.word(3)); 2353 type = module->get_def(type.word(2)); 2354 } 2355 else { 2356 type = module->get_def(type.word(3)); 2357 } 2358 } 2359 2360 switch (type.opcode()) { 2361 case spv::OpTypeStruct: { 2362 for (auto insn : *module) { 2363 if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) { 2364 if (insn.word(2) == spv::DecorationBlock) { 2365 return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER || 2366 descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; 2367 } else if (insn.word(2) == spv::DecorationBufferBlock) { 2368 return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER || 2369 descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC; 2370 } 2371 } 2372 } 2373 2374 /* Invalid */ 2375 return false; 2376 } 2377 2378 case spv::OpTypeSampler: 2379 return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER; 2380 2381 case spv::OpTypeSampledImage: 2382 if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) { 2383 /* Slight relaxation for some GLSL historical madness: samplerBuffer 2384 * doesn't really have a sampler, and a texel buffer descriptor 2385 * doesn't really provide one. Allow this slight mismatch. 2386 */ 2387 auto image_type = module->get_def(type.word(2)); 2388 auto dim = image_type.word(3); 2389 auto sampled = image_type.word(7); 2390 return dim == spv::DimBuffer && sampled == 1; 2391 } 2392 return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; 2393 2394 case spv::OpTypeImage: { 2395 /* Many descriptor types backing image types-- depends on dimension 2396 * and whether the image will be used with a sampler. SPIRV for 2397 * Vulkan requires that sampled be 1 or 2 -- leaving the decision to 2398 * runtime is unacceptable. 2399 */ 2400 auto dim = type.word(3); 2401 auto sampled = type.word(7); 2402 2403 if (dim == spv::DimSubpassData) { 2404 return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; 2405 } else if (dim == spv::DimBuffer) { 2406 if (sampled == 1) { 2407 return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; 2408 } else { 2409 return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; 2410 } 2411 } else if (sampled == 1) { 2412 return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; 2413 } else { 2414 return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; 2415 } 2416 } 2417 2418 /* We shouldn't really see any other junk types -- but if we do, they're 2419 * a mismatch. 2420 */ 2421 default: 2422 return false; /* Mismatch */ 2423 } 2424} 2425 2426static bool require_feature(debug_report_data *report_data, VkBool32 feature, char const *feature_name) { 2427 if (!feature) { 2428 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 2429 __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC", 2430 "Shader requires VkPhysicalDeviceFeatures::%s but is not " 2431 "enabled on the device", 2432 feature_name)) { 2433 return false; 2434 } 2435 } 2436 2437 return true; 2438} 2439 2440static bool validate_shader_capabilities(debug_report_data *report_data, shader_module const *src, 2441 VkPhysicalDeviceFeatures const *enabledFeatures) { 2442 bool pass = true; 2443 2444 2445 for (auto insn : *src) { 2446 if (insn.opcode() == spv::OpCapability) { 2447 switch (insn.word(1)) { 2448 case spv::CapabilityMatrix: 2449 case spv::CapabilityShader: 2450 case spv::CapabilityInputAttachment: 2451 case spv::CapabilitySampled1D: 2452 case spv::CapabilityImage1D: 2453 case spv::CapabilitySampledBuffer: 2454 case spv::CapabilityImageBuffer: 2455 case spv::CapabilityImageQuery: 2456 case spv::CapabilityDerivativeControl: 2457 // Always supported by a Vulkan 1.0 implementation -- no feature bits. 2458 break; 2459 2460 case spv::CapabilityGeometry: 2461 pass &= require_feature(report_data, enabledFeatures->geometryShader, "geometryShader"); 2462 break; 2463 2464 case spv::CapabilityTessellation: 2465 pass &= require_feature(report_data, enabledFeatures->tessellationShader, "tessellationShader"); 2466 break; 2467 2468 case spv::CapabilityFloat64: 2469 pass &= require_feature(report_data, enabledFeatures->shaderFloat64, "shaderFloat64"); 2470 break; 2471 2472 case spv::CapabilityInt64: 2473 pass &= require_feature(report_data, enabledFeatures->shaderInt64, "shaderInt64"); 2474 break; 2475 2476 case spv::CapabilityTessellationPointSize: 2477 case spv::CapabilityGeometryPointSize: 2478 pass &= require_feature(report_data, enabledFeatures->shaderTessellationAndGeometryPointSize, 2479 "shaderTessellationAndGeometryPointSize"); 2480 break; 2481 2482 case spv::CapabilityImageGatherExtended: 2483 pass &= require_feature(report_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended"); 2484 break; 2485 2486 case spv::CapabilityStorageImageMultisample: 2487 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample"); 2488 break; 2489 2490 case spv::CapabilityUniformBufferArrayDynamicIndexing: 2491 pass &= require_feature(report_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing, 2492 "shaderUniformBufferArrayDynamicIndexing"); 2493 break; 2494 2495 case spv::CapabilitySampledImageArrayDynamicIndexing: 2496 pass &= require_feature(report_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing, 2497 "shaderSampledImageArrayDynamicIndexing"); 2498 break; 2499 2500 case spv::CapabilityStorageBufferArrayDynamicIndexing: 2501 pass &= require_feature(report_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing, 2502 "shaderStorageBufferArrayDynamicIndexing"); 2503 break; 2504 2505 case spv::CapabilityStorageImageArrayDynamicIndexing: 2506 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing, 2507 "shaderStorageImageArrayDynamicIndexing"); 2508 break; 2509 2510 case spv::CapabilityClipDistance: 2511 pass &= require_feature(report_data, enabledFeatures->shaderClipDistance, "shaderClipDistance"); 2512 break; 2513 2514 case spv::CapabilityCullDistance: 2515 pass &= require_feature(report_data, enabledFeatures->shaderCullDistance, "shaderCullDistance"); 2516 break; 2517 2518 case spv::CapabilityImageCubeArray: 2519 pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray"); 2520 break; 2521 2522 case spv::CapabilitySampleRateShading: 2523 pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading"); 2524 break; 2525 2526 case spv::CapabilitySparseResidency: 2527 pass &= require_feature(report_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency"); 2528 break; 2529 2530 case spv::CapabilityMinLod: 2531 pass &= require_feature(report_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod"); 2532 break; 2533 2534 case spv::CapabilitySampledCubeArray: 2535 pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray"); 2536 break; 2537 2538 case spv::CapabilityImageMSArray: 2539 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample"); 2540 break; 2541 2542 case spv::CapabilityStorageImageExtendedFormats: 2543 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageExtendedFormats, 2544 "shaderStorageImageExtendedFormats"); 2545 break; 2546 2547 case spv::CapabilityInterpolationFunction: 2548 pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading"); 2549 break; 2550 2551 case spv::CapabilityStorageImageReadWithoutFormat: 2552 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageReadWithoutFormat, 2553 "shaderStorageImageReadWithoutFormat"); 2554 break; 2555 2556 case spv::CapabilityStorageImageWriteWithoutFormat: 2557 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageWriteWithoutFormat, 2558 "shaderStorageImageWriteWithoutFormat"); 2559 break; 2560 2561 case spv::CapabilityMultiViewport: 2562 pass &= require_feature(report_data, enabledFeatures->multiViewport, "multiViewport"); 2563 break; 2564 2565 default: 2566 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 2567 __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC", 2568 "Shader declares capability %u, not supported in Vulkan.", 2569 insn.word(1))) 2570 pass = false; 2571 break; 2572 } 2573 } 2574 } 2575 2576 return pass; 2577} 2578 2579static bool validate_pipeline_shader_stage(debug_report_data *report_data, 2580 VkPipelineShaderStageCreateInfo const *pStage, 2581 PIPELINE_NODE *pipeline, 2582 shader_module **out_module, 2583 spirv_inst_iter *out_entrypoint, 2584 VkPhysicalDeviceFeatures const *enabledFeatures, 2585 std::unordered_map<VkShaderModule, 2586 std::unique_ptr<shader_module>> const &shaderModuleMap) { 2587 bool pass = true; 2588 auto module_it = shaderModuleMap.find(pStage->module); 2589 auto module = *out_module = module_it->second.get(); 2590 pass &= validate_specialization_offsets(report_data, pStage); 2591 2592 /* find the entrypoint */ 2593 auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage); 2594 if (entrypoint == module->end()) { 2595 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 2596 __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC", 2597 "No entrypoint found named `%s` for stage %s", pStage->pName, 2598 string_VkShaderStageFlagBits(pStage->stage))) { 2599 pass = false; 2600 } 2601 } 2602 2603 /* validate shader capabilities against enabled device features */ 2604 pass &= validate_shader_capabilities(report_data, module, enabledFeatures); 2605 2606 /* mark accessible ids */ 2607 std::unordered_set<uint32_t> accessible_ids; 2608 mark_accessible_ids(module, entrypoint, accessible_ids); 2609 2610 /* validate descriptor set layout against what the entrypoint actually uses */ 2611 std::map<descriptor_slot_t, interface_var> descriptor_uses; 2612 collect_interface_by_descriptor_slot(report_data, module, accessible_ids, descriptor_uses); 2613 2614 auto pipelineLayout = pipeline->pipelineLayout; 2615 2616 /* validate push constant usage */ 2617 pass &= validate_push_constant_usage(report_data, &pipelineLayout->pushConstantRanges, 2618 module, accessible_ids, pStage->stage); 2619 2620 /* validate descriptor use */ 2621 for (auto use : descriptor_uses) { 2622 // While validating shaders capture which slots are used by the pipeline 2623 pipeline->active_slots[use.first.first].insert(use.first.second); 2624 2625 /* verify given pipelineLayout has requested setLayout with requested binding */ 2626 const auto & binding = get_descriptor_binding(pipelineLayout, use.first); 2627 unsigned required_descriptor_count; 2628 2629 if (!binding) { 2630 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 2631 __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC", 2632 "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout", 2633 use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) { 2634 pass = false; 2635 } 2636 } else if (~binding->stageFlags & pStage->stage) { 2637 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 2638 /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC", 2639 "Shader uses descriptor slot %u.%u (used " 2640 "as type `%s`) but descriptor not " 2641 "accessible from stage %s", 2642 use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(), 2643 string_VkShaderStageFlagBits(pStage->stage))) { 2644 pass = false; 2645 } 2646 } else if (!descriptor_type_match(module, use.second.type_id, binding->descriptorType, 2647 /*out*/ required_descriptor_count)) { 2648 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__, 2649 SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC", "Type mismatch on descriptor slot " 2650 "%u.%u (used as type `%s`) but " 2651 "descriptor of type %s", 2652 use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(), 2653 string_VkDescriptorType(binding->descriptorType))) { 2654 pass = false; 2655 } 2656 } else if (binding->descriptorCount < required_descriptor_count) { 2657 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__, 2658 SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC", 2659 "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided", 2660 required_descriptor_count, use.first.first, use.first.second, 2661 describe_type(module, use.second.type_id).c_str(), binding->descriptorCount)) { 2662 pass = false; 2663 } 2664 } 2665 } 2666 2667 return pass; 2668} 2669 2670 2671// Validate that the shaders used by the given pipeline and store the active_slots 2672// that are actually used by the pipeline into pPipeline->active_slots 2673static bool validate_and_capture_pipeline_shader_state(debug_report_data *report_data, PIPELINE_NODE *pPipeline, 2674 VkPhysicalDeviceFeatures const *enabledFeatures, 2675 std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const & shaderModuleMap) { 2676 auto pCreateInfo = pPipeline->graphicsPipelineCI.ptr(); 2677 int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT); 2678 int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT); 2679 2680 shader_module *shaders[5]; 2681 memset(shaders, 0, sizeof(shaders)); 2682 spirv_inst_iter entrypoints[5]; 2683 memset(entrypoints, 0, sizeof(entrypoints)); 2684 VkPipelineVertexInputStateCreateInfo const *vi = 0; 2685 bool pass = true; 2686 2687 for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) { 2688 auto pStage = &pCreateInfo->pStages[i]; 2689 auto stage_id = get_shader_stage_id(pStage->stage); 2690 pass &= validate_pipeline_shader_stage(report_data, pStage, pPipeline, 2691 &shaders[stage_id], &entrypoints[stage_id], 2692 enabledFeatures, shaderModuleMap); 2693 } 2694 2695 vi = pCreateInfo->pVertexInputState; 2696 2697 if (vi) { 2698 pass &= validate_vi_consistency(report_data, vi); 2699 } 2700 2701 if (shaders[vertex_stage]) { 2702 pass &= validate_vi_against_vs_inputs(report_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]); 2703 } 2704 2705 int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT); 2706 int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT); 2707 2708 while (!shaders[producer] && producer != fragment_stage) { 2709 producer++; 2710 consumer++; 2711 } 2712 2713 for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) { 2714 assert(shaders[producer]); 2715 if (shaders[consumer]) { 2716 pass &= validate_interface_between_stages(report_data, 2717 shaders[producer], entrypoints[producer], &shader_stage_attribs[producer], 2718 shaders[consumer], entrypoints[consumer], &shader_stage_attribs[consumer]); 2719 2720 producer = consumer; 2721 } 2722 } 2723 2724 if (shaders[fragment_stage]) { 2725 pass &= validate_fs_outputs_against_render_pass(report_data, shaders[fragment_stage], entrypoints[fragment_stage], 2726 pPipeline->render_pass_ci.ptr(), pCreateInfo->subpass); 2727 } 2728 2729 return pass; 2730} 2731 2732static bool validate_compute_pipeline(debug_report_data *report_data, PIPELINE_NODE *pPipeline, VkPhysicalDeviceFeatures const *enabledFeatures, 2733 std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const & shaderModuleMap) { 2734 auto pCreateInfo = pPipeline->computePipelineCI.ptr(); 2735 2736 shader_module *module; 2737 spirv_inst_iter entrypoint; 2738 2739 return validate_pipeline_shader_stage(report_data, &pCreateInfo->stage, pPipeline, 2740 &module, &entrypoint, enabledFeatures, shaderModuleMap); 2741} 2742// Return Set node ptr for specified set or else NULL 2743cvdescriptorset::DescriptorSet *getSetNode(const layer_data *my_data, VkDescriptorSet set) { 2744 auto set_it = my_data->setMap.find(set); 2745 if (set_it == my_data->setMap.end()) { 2746 return NULL; 2747 } 2748 return set_it->second; 2749} 2750// For the given command buffer, verify and update the state for activeSetBindingsPairs 2751// This includes: 2752// 1. Verifying that any dynamic descriptor in that set has a valid dynamic offset bound. 2753// To be valid, the dynamic offset combined with the offset and range from its 2754// descriptor update must not overflow the size of its buffer being updated 2755// 2. Grow updateImages for given pCB to include any bound STORAGE_IMAGE descriptor images 2756// 3. Grow updateBuffers for pCB to include buffers from STORAGE*_BUFFER descriptor buffers 2757static bool validate_and_update_drawtime_descriptor_state( 2758 layer_data *dev_data, GLOBAL_CB_NODE *pCB, 2759 const vector<std::tuple<cvdescriptorset::DescriptorSet *, unordered_set<uint32_t>, 2760 std::vector<uint32_t> const *>> &activeSetBindingsPairs) { 2761 bool result = false; 2762 for (auto set_bindings_pair : activeSetBindingsPairs) { 2763 cvdescriptorset::DescriptorSet *set_node = std::get<0>(set_bindings_pair); 2764 std::string err_str; 2765 if (!set_node->ValidateDrawState(std::get<1>(set_bindings_pair), *std::get<2>(set_bindings_pair), 2766 &err_str)) { 2767 // Report error here 2768 auto set = set_node->GetSet(); 2769 result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 2770 reinterpret_cast<const uint64_t &>(set), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS", 2771 "DS 0x%" PRIxLEAST64 " encountered the following validation error at draw time: %s", 2772 reinterpret_cast<const uint64_t &>(set), err_str.c_str()); 2773 } 2774 set_node->GetStorageUpdates(std::get<1>(set_bindings_pair), &pCB->updateBuffers, &pCB->updateImages); 2775 } 2776 return result; 2777} 2778 2779// For given pipeline, return number of MSAA samples, or one if MSAA disabled 2780static VkSampleCountFlagBits getNumSamples(PIPELINE_NODE const *pipe) { 2781 if (pipe->graphicsPipelineCI.pMultisampleState != NULL && 2782 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) { 2783 return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples; 2784 } 2785 return VK_SAMPLE_COUNT_1_BIT; 2786} 2787 2788// Validate draw-time state related to the PSO 2789static bool validatePipelineDrawtimeState(layer_data const *my_data, 2790 LAST_BOUND_STATE const &state, 2791 const GLOBAL_CB_NODE *pCB, 2792 PIPELINE_NODE const *pPipeline) { 2793 bool skip_call = false; 2794 2795 // Verify Vtx binding 2796 if (pPipeline->vertexBindingDescriptions.size() > 0) { 2797 for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) { 2798 if ((pCB->currentDrawData.buffers.size() < (i + 1)) || (pCB->currentDrawData.buffers[i] == VK_NULL_HANDLE)) { 2799 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 2800 __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS", 2801 "The Pipeline State Object (0x%" PRIxLEAST64 2802 ") expects that this Command Buffer's vertex binding Index " PRINTF_SIZE_T_SPECIFIER 2803 " should be set via vkCmdBindVertexBuffers.", 2804 (uint64_t)state.pipeline, i); 2805 } 2806 } 2807 } else { 2808 if (!pCB->currentDrawData.buffers.empty()) { 2809 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 2810 0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS", 2811 "Vertex buffers are bound to command buffer (0x%" PRIxLEAST64 2812 ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIxLEAST64 ").", 2813 (uint64_t)pCB->commandBuffer, (uint64_t)state.pipeline); 2814 } 2815 } 2816 // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count. 2817 // Skip check if rasterization is disabled or there is no viewport. 2818 if ((!pPipeline->graphicsPipelineCI.pRasterizationState || 2819 (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) && 2820 pPipeline->graphicsPipelineCI.pViewportState) { 2821 bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT); 2822 bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR); 2823 if (dynViewport) { 2824 if (pCB->viewports.size() != pPipeline->graphicsPipelineCI.pViewportState->viewportCount) { 2825 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 2826 __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", 2827 "Dynamic viewportCount from vkCmdSetViewport() is " PRINTF_SIZE_T_SPECIFIER 2828 ", but PSO viewportCount is %u. These counts must match.", 2829 pCB->viewports.size(), pPipeline->graphicsPipelineCI.pViewportState->viewportCount); 2830 } 2831 } 2832 if (dynScissor) { 2833 if (pCB->scissors.size() != pPipeline->graphicsPipelineCI.pViewportState->scissorCount) { 2834 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 2835 __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", 2836 "Dynamic scissorCount from vkCmdSetScissor() is " PRINTF_SIZE_T_SPECIFIER 2837 ", but PSO scissorCount is %u. These counts must match.", 2838 pCB->scissors.size(), pPipeline->graphicsPipelineCI.pViewportState->scissorCount); 2839 } 2840 } 2841 } 2842 2843 // Verify that any MSAA request in PSO matches sample# in bound FB 2844 // Skip the check if rasterization is disabled. 2845 if (!pPipeline->graphicsPipelineCI.pRasterizationState || 2846 (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) { 2847 VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline); 2848 if (pCB->activeRenderPass) { 2849 const VkRenderPassCreateInfo *render_pass_info = pCB->activeRenderPass->pCreateInfo; 2850 const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass]; 2851 uint32_t i; 2852 2853 const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState; 2854 if ((color_blend_state != NULL) && (pCB->activeSubpass == pPipeline->graphicsPipelineCI.subpass) && 2855 (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount)) { 2856 skip_call |= 2857 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, 2858 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 2859 "Render pass subpass %u mismatch with blending state defined and blend state attachment " 2860 "count %u while subpass color attachment count %u in Pipeline (0x%" PRIxLEAST64 ")! These " 2861 "must be the same at draw-time.", 2862 pCB->activeSubpass, color_blend_state->attachmentCount, subpass_desc->colorAttachmentCount, 2863 reinterpret_cast<const uint64_t &>(pPipeline->pipeline)); 2864 } 2865 2866 unsigned subpass_num_samples = 0; 2867 2868 for (i = 0; i < subpass_desc->colorAttachmentCount; i++) { 2869 auto attachment = subpass_desc->pColorAttachments[i].attachment; 2870 if (attachment != VK_ATTACHMENT_UNUSED) 2871 subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples; 2872 } 2873 2874 if (subpass_desc->pDepthStencilAttachment && 2875 subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { 2876 auto attachment = subpass_desc->pDepthStencilAttachment->attachment; 2877 subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples; 2878 } 2879 2880 if (subpass_num_samples && pso_num_samples != subpass_num_samples) { 2881 skip_call |= 2882 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, 2883 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS", 2884 "Num samples mismatch! At draw-time in Pipeline (0x%" PRIxLEAST64 2885 ") with %u samples while current RenderPass (0x%" PRIxLEAST64 ") w/ %u samples!", 2886 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), pso_num_samples, 2887 reinterpret_cast<const uint64_t &>(pCB->activeRenderPass->renderPass), subpass_num_samples); 2888 } 2889 } else { 2890 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, 2891 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS", 2892 "No active render pass found at draw-time in Pipeline (0x%" PRIxLEAST64 ")!", 2893 reinterpret_cast<const uint64_t &>(pPipeline->pipeline)); 2894 } 2895 } 2896 // Verify that PSO creation renderPass is compatible with active renderPass 2897 if (pCB->activeRenderPass) { 2898 std::string err_string; 2899 if ((pCB->activeRenderPass->renderPass != pPipeline->graphicsPipelineCI.renderPass) && 2900 !verify_renderpass_compatibility(my_data, pCB->activeRenderPass->pCreateInfo, pPipeline->render_pass_ci.ptr(), 2901 err_string)) { 2902 // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with 2903 skip_call |= 2904 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, 2905 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS", 2906 "At Draw time the active render pass (0x%" PRIxLEAST64 ") is incompatible w/ gfx pipeline " 2907 "(0x%" PRIxLEAST64 ") that was created w/ render pass (0x%" PRIxLEAST64 ") due to: %s", 2908 reinterpret_cast<uint64_t &>(pCB->activeRenderPass->renderPass), reinterpret_cast<uint64_t &>(pPipeline), 2909 reinterpret_cast<const uint64_t &>(pPipeline->graphicsPipelineCI.renderPass), err_string.c_str()); 2910 } 2911 } 2912 // TODO : Add more checks here 2913 2914 return skip_call; 2915} 2916 2917// Validate overall state at the time of a draw call 2918static bool validate_and_update_draw_state(layer_data *my_data, GLOBAL_CB_NODE *pCB, const bool indexedDraw, 2919 const VkPipelineBindPoint bindPoint) { 2920 bool result = false; 2921 auto const &state = pCB->lastBound[bindPoint]; 2922 PIPELINE_NODE *pPipe = getPipeline(my_data, state.pipeline); 2923 if (nullptr == pPipe) { 2924 result |= log_msg( 2925 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__, 2926 DRAWSTATE_INVALID_PIPELINE, "DS", 2927 "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline()."); 2928 // Early return as any further checks below will be busted w/o a pipeline 2929 if (result) 2930 return true; 2931 } 2932 // First check flag states 2933 if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint) 2934 result = validate_draw_state_flags(my_data, pCB, pPipe, indexedDraw); 2935 2936 // Now complete other state checks 2937 if (state.pipelineLayout) { 2938 string errorString; 2939 auto pipelineLayout = (bindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) ? pPipe->graphicsPipelineCI.layout : pPipe->computePipelineCI.layout; 2940 2941 // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets 2942 vector<std::tuple<cvdescriptorset::DescriptorSet *, unordered_set<uint32_t>, std::vector<uint32_t> const *>> activeSetBindingsPairs; 2943 for (auto & setBindingPair : pPipe->active_slots) { 2944 uint32_t setIndex = setBindingPair.first; 2945 // If valid set is not bound throw an error 2946 if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) { 2947 result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 2948 DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS", 2949 "VkPipeline 0x%" PRIxLEAST64 " uses set #%u but that set is not bound.", (uint64_t)pPipe->pipeline, 2950 setIndex); 2951 } else if (!verify_set_layout_compatibility(my_data, state.boundDescriptorSets[setIndex], 2952 pipelineLayout, setIndex, errorString)) { 2953 // Set is bound but not compatible w/ overlapping pipelineLayout from PSO 2954 VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet(); 2955 result |= 2956 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 2957 (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS", 2958 "VkDescriptorSet (0x%" PRIxLEAST64 2959 ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIxLEAST64 " due to: %s", 2960 (uint64_t)setHandle, setIndex, (uint64_t)pipelineLayout, errorString.c_str()); 2961 } else { // Valid set is bound and layout compatible, validate that it's updated 2962 // Pull the set node 2963 cvdescriptorset::DescriptorSet *pSet = state.boundDescriptorSets[setIndex]; 2964 // Save vector of all active sets to verify dynamicOffsets below 2965 activeSetBindingsPairs.push_back(std::make_tuple(pSet, setBindingPair.second, 2966 &state.dynamicOffsets[setIndex])); 2967 // Make sure set has been updated if it has no immutable samplers 2968 // If it has immutable samplers, we'll flag error later as needed depending on binding 2969 if (!pSet->IsUpdated()) { 2970 for (auto binding : setBindingPair.second) { 2971 if (!pSet->GetImmutableSamplerPtrFromBinding(binding)) { 2972 result |= log_msg( 2973 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 2974 (uint64_t)pSet->GetSet(), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS", 2975 "DS 0x%" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so " 2976 "this will result in undefined behavior.", 2977 (uint64_t)pSet->GetSet()); 2978 } 2979 } 2980 } 2981 } 2982 } 2983 // For given active slots, verify any dynamic descriptors and record updated images & buffers 2984 result |= validate_and_update_drawtime_descriptor_state(my_data, pCB, activeSetBindingsPairs); 2985 } 2986 2987 // Check general pipeline state that needs to be validated at drawtime 2988 if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint) 2989 result |= validatePipelineDrawtimeState(my_data, state, pCB, pPipe); 2990 2991 return result; 2992} 2993 2994// Validate HW line width capabilities prior to setting requested line width. 2995static bool verifyLineWidth(layer_data *my_data, DRAW_STATE_ERROR dsError, const uint64_t &target, float lineWidth) { 2996 bool skip_call = false; 2997 2998 // First check to see if the physical device supports wide lines. 2999 if ((VK_FALSE == my_data->phys_dev_properties.features.wideLines) && (1.0f != lineWidth)) { 3000 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target, __LINE__, 3001 dsError, "DS", "Attempt to set lineWidth to %f but physical device wideLines feature " 3002 "not supported/enabled so lineWidth must be 1.0f!", 3003 lineWidth); 3004 } else { 3005 // Otherwise, make sure the width falls in the valid range. 3006 if ((my_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) || 3007 (my_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) { 3008 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target, 3009 __LINE__, dsError, "DS", "Attempt to set lineWidth to %f but physical device limits line width " 3010 "to between [%f, %f]!", 3011 lineWidth, my_data->phys_dev_properties.properties.limits.lineWidthRange[0], 3012 my_data->phys_dev_properties.properties.limits.lineWidthRange[1]); 3013 } 3014 } 3015 3016 return skip_call; 3017} 3018 3019// Verify that create state for a pipeline is valid 3020static bool verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_NODE *> pPipelines, 3021 int pipelineIndex) { 3022 bool skip_call = false; 3023 3024 PIPELINE_NODE *pPipeline = pPipelines[pipelineIndex]; 3025 3026 // If create derivative bit is set, check that we've specified a base 3027 // pipeline correctly, and that the base pipeline was created to allow 3028 // derivatives. 3029 if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) { 3030 PIPELINE_NODE *pBasePipeline = nullptr; 3031 if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^ 3032 (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) { 3033 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3034 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", 3035 "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified"); 3036 } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) { 3037 if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) { 3038 skip_call |= 3039 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3040 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", 3041 "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline."); 3042 } else { 3043 pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex]; 3044 } 3045 } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) { 3046 pBasePipeline = getPipeline(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle); 3047 } 3048 3049 if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) { 3050 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3051 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", 3052 "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives."); 3053 } 3054 } 3055 3056 if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) { 3057 if (!my_data->phys_dev_properties.features.independentBlend) { 3058 if (pPipeline->attachments.size() > 1) { 3059 VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0]; 3060 for (size_t i = 1; i < pPipeline->attachments.size(); i++) { 3061 if ((pAttachments[0].blendEnable != pAttachments[i].blendEnable) || 3062 (pAttachments[0].srcColorBlendFactor != pAttachments[i].srcColorBlendFactor) || 3063 (pAttachments[0].dstColorBlendFactor != pAttachments[i].dstColorBlendFactor) || 3064 (pAttachments[0].colorBlendOp != pAttachments[i].colorBlendOp) || 3065 (pAttachments[0].srcAlphaBlendFactor != pAttachments[i].srcAlphaBlendFactor) || 3066 (pAttachments[0].dstAlphaBlendFactor != pAttachments[i].dstAlphaBlendFactor) || 3067 (pAttachments[0].alphaBlendOp != pAttachments[i].alphaBlendOp) || 3068 (pAttachments[0].colorWriteMask != pAttachments[i].colorWriteMask)) { 3069 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 3070 __LINE__, DRAWSTATE_INDEPENDENT_BLEND, "DS", 3071 "Invalid Pipeline CreateInfo: If independent blend feature not " 3072 "enabled, all elements of pAttachments must be identical"); 3073 } 3074 } 3075 } 3076 } 3077 if (!my_data->phys_dev_properties.features.logicOp && 3078 (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) { 3079 skip_call |= 3080 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3081 DRAWSTATE_DISABLED_LOGIC_OP, "DS", 3082 "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE"); 3083 } 3084 if ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable == VK_TRUE) && 3085 ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOp < VK_LOGIC_OP_CLEAR) || 3086 (pPipeline->graphicsPipelineCI.pColorBlendState->logicOp > VK_LOGIC_OP_SET))) { 3087 skip_call |= 3088 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3089 DRAWSTATE_INVALID_LOGIC_OP, "DS", 3090 "Invalid Pipeline CreateInfo: If logicOpEnable is VK_TRUE, logicOp must be a valid VkLogicOp value"); 3091 } 3092 } 3093 3094 // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state 3095 // produces nonsense errors that confuse users. Other layers should already 3096 // emit errors for renderpass being invalid. 3097 auto renderPass = getRenderPass(my_data, pPipeline->graphicsPipelineCI.renderPass); 3098 if (renderPass && 3099 pPipeline->graphicsPipelineCI.subpass >= renderPass->pCreateInfo->subpassCount) { 3100 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3101 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u " 3102 "is out of range for this renderpass (0..%u)", 3103 pPipeline->graphicsPipelineCI.subpass, renderPass->pCreateInfo->subpassCount - 1); 3104 } 3105 3106 if (!validate_and_capture_pipeline_shader_state(my_data->report_data, pPipeline, &my_data->phys_dev_properties.features, 3107 my_data->shaderModuleMap)) { 3108 skip_call = true; 3109 } 3110 // Each shader's stage must be unique 3111 if (pPipeline->duplicate_shaders) { 3112 for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) { 3113 if (pPipeline->duplicate_shaders & stage) { 3114 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 3115 __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", 3116 "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s", 3117 string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage))); 3118 } 3119 } 3120 } 3121 // VS is required 3122 if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) { 3123 skip_call |= 3124 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3125 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vtx Shader required"); 3126 } 3127 // Either both or neither TC/TE shaders should be defined 3128 if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) != 3129 ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) { 3130 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3131 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", 3132 "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair"); 3133 } 3134 // Compute shaders should be specified independent of Gfx shaders 3135 if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) && 3136 (pPipeline->active_shaders & 3137 (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT | 3138 VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) { 3139 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3140 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", 3141 "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline"); 3142 } 3143 // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines. 3144 // Mismatching primitive topology and tessellation fails graphics pipeline creation. 3145 if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) && 3146 (!pPipeline->graphicsPipelineCI.pInputAssemblyState || 3147 pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) { 3148 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3149 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: " 3150 "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA " 3151 "topology for tessellation pipelines"); 3152 } 3153 if (pPipeline->graphicsPipelineCI.pInputAssemblyState && 3154 pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) { 3155 if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) { 3156 skip_call |= 3157 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3158 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: " 3159 "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive " 3160 "topology is only valid for tessellation pipelines"); 3161 } 3162 if (!pPipeline->graphicsPipelineCI.pTessellationState) { 3163 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3164 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", 3165 "Invalid Pipeline CreateInfo State: " 3166 "pTessellationState is NULL when VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive " 3167 "topology used. pTessellationState must not be NULL in this case."); 3168 } else if (!pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints || 3169 (pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints > 32)) { 3170 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3171 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: " 3172 "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive " 3173 "topology used with patchControlPoints value %u." 3174 " patchControlPoints should be >0 and <=32.", 3175 pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints); 3176 } 3177 } 3178 // If a rasterization state is provided, make sure that the line width conforms to the HW. 3179 if (pPipeline->graphicsPipelineCI.pRasterizationState) { 3180 if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) { 3181 skip_call |= verifyLineWidth(my_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, reinterpret_cast<uint64_t &>(pPipeline), 3182 pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth); 3183 } 3184 } 3185 // Viewport state must be included if rasterization is enabled. 3186 // If the viewport state is included, the viewport and scissor counts should always match. 3187 // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler 3188 if (!pPipeline->graphicsPipelineCI.pRasterizationState || 3189 (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) { 3190 if (!pPipeline->graphicsPipelineCI.pViewportState) { 3191 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3192 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport " 3193 "and scissors are dynamic PSO must include " 3194 "viewportCount and scissorCount in pViewportState."); 3195 } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount != 3196 pPipeline->graphicsPipelineCI.pViewportState->viewportCount) { 3197 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3198 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", 3199 "Gfx Pipeline viewport count (%u) must match scissor count (%u).", 3200 pPipeline->graphicsPipelineCI.pViewportState->viewportCount, 3201 pPipeline->graphicsPipelineCI.pViewportState->scissorCount); 3202 } else { 3203 // If viewport or scissor are not dynamic, then verify that data is appropriate for count 3204 bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT); 3205 bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR); 3206 if (!dynViewport) { 3207 if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount && 3208 !pPipeline->graphicsPipelineCI.pViewportState->pViewports) { 3209 skip_call |= 3210 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3211 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", 3212 "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you " 3213 "must either include pViewports data, or include viewport in pDynamicState and set it with " 3214 "vkCmdSetViewport().", 3215 pPipeline->graphicsPipelineCI.pViewportState->viewportCount); 3216 } 3217 } 3218 if (!dynScissor) { 3219 if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount && 3220 !pPipeline->graphicsPipelineCI.pViewportState->pScissors) { 3221 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 3222 __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", 3223 "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you " 3224 "must either include pScissors data, or include scissor in pDynamicState and set it with " 3225 "vkCmdSetScissor().", 3226 pPipeline->graphicsPipelineCI.pViewportState->scissorCount); 3227 } 3228 } 3229 } 3230 } 3231 return skip_call; 3232} 3233 3234// Free the Pipeline nodes 3235static void deletePipelines(layer_data *my_data) { 3236 if (my_data->pipelineMap.size() <= 0) 3237 return; 3238 for (auto &pipe_map_pair : my_data->pipelineMap) { 3239 delete pipe_map_pair.second; 3240 } 3241 my_data->pipelineMap.clear(); 3242} 3243 3244// Block of code at start here specifically for managing/tracking DSs 3245 3246// Return Pool node ptr for specified pool or else NULL 3247DESCRIPTOR_POOL_NODE *getPoolNode(const layer_data *dev_data, const VkDescriptorPool pool) { 3248 auto pool_it = dev_data->descriptorPoolMap.find(pool); 3249 if (pool_it == dev_data->descriptorPoolMap.end()) { 3250 return NULL; 3251 } 3252 return pool_it->second; 3253} 3254 3255// Return false if update struct is of valid type, otherwise flag error and return code from callback 3256static bool validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) { 3257 switch (pUpdateStruct->sType) { 3258 case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET: 3259 case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET: 3260 return false; 3261 default: 3262 return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3263 DRAWSTATE_INVALID_UPDATE_STRUCT, "DS", 3264 "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree", 3265 string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType); 3266 } 3267} 3268 3269// Set count for given update struct in the last parameter 3270static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) { 3271 switch (pUpdateStruct->sType) { 3272 case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET: 3273 return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount; 3274 case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET: 3275 // TODO : Need to understand this case better and make sure code is correct 3276 return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount; 3277 default: 3278 return 0; 3279 } 3280} 3281 3282// For given layout and update, return the first overall index of the layout that is updated 3283static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index, 3284 const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) { 3285 return binding_start_index + arrayIndex; 3286} 3287// For given layout and update, return the last overall index of the layout that is updated 3288static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index, 3289 const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) { 3290 uint32_t count = getUpdateCount(my_data, device, pUpdateStruct); 3291 return binding_start_index + arrayIndex + count - 1; 3292} 3293// Verify that the descriptor type in the update struct matches what's expected by the layout 3294static bool validateUpdateConsistency(layer_data *my_data, const VkDevice device, const VkDescriptorType layout_type, 3295 const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) { 3296 // First get actual type of update 3297 bool skip_call = false; 3298 VkDescriptorType actualType = VK_DESCRIPTOR_TYPE_MAX_ENUM; 3299 switch (pUpdateStruct->sType) { 3300 case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET: 3301 actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType; 3302 break; 3303 case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET: 3304 /* no need to validate */ 3305 return false; 3306 break; 3307 default: 3308 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3309 DRAWSTATE_INVALID_UPDATE_STRUCT, "DS", 3310 "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree", 3311 string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType); 3312 } 3313 if (!skip_call) { 3314 if (layout_type != actualType) { 3315 skip_call |= log_msg( 3316 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3317 DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS", 3318 "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!", 3319 string_VkDescriptorType(actualType), string_VkDescriptorType(layout_type)); 3320 } 3321 } 3322 return skip_call; 3323} 3324//TODO: Consolidate functions 3325bool FindLayout(const GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) { 3326 layer_data *my_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map); 3327 if (!(imgpair.subresource.aspectMask & aspectMask)) { 3328 return false; 3329 } 3330 VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask; 3331 imgpair.subresource.aspectMask = aspectMask; 3332 auto imgsubIt = pCB->imageLayoutMap.find(imgpair); 3333 if (imgsubIt == pCB->imageLayoutMap.end()) { 3334 return false; 3335 } 3336 if (node.layout != VK_IMAGE_LAYOUT_MAX_ENUM && node.layout != imgsubIt->second.layout) { 3337 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 3338 reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS", 3339 "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s", 3340 reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.layout), string_VkImageLayout(imgsubIt->second.layout)); 3341 } 3342 if (node.initialLayout != VK_IMAGE_LAYOUT_MAX_ENUM && node.initialLayout != imgsubIt->second.initialLayout) { 3343 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 3344 reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS", 3345 "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple initial layout types: %s and %s", 3346 reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.initialLayout), string_VkImageLayout(imgsubIt->second.initialLayout)); 3347 } 3348 node = imgsubIt->second; 3349 return true; 3350} 3351 3352bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) { 3353 if (!(imgpair.subresource.aspectMask & aspectMask)) { 3354 return false; 3355 } 3356 VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask; 3357 imgpair.subresource.aspectMask = aspectMask; 3358 auto imgsubIt = my_data->imageLayoutMap.find(imgpair); 3359 if (imgsubIt == my_data->imageLayoutMap.end()) { 3360 return false; 3361 } 3362 if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) { 3363 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 3364 reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS", 3365 "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s", 3366 reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(layout), string_VkImageLayout(imgsubIt->second.layout)); 3367 } 3368 layout = imgsubIt->second.layout; 3369 return true; 3370} 3371 3372// find layout(s) on the cmd buf level 3373bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) { 3374 ImageSubresourcePair imgpair = {image, true, range}; 3375 node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM); 3376 FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT); 3377 FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_DEPTH_BIT); 3378 FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_STENCIL_BIT); 3379 FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_METADATA_BIT); 3380 if (node.layout == VK_IMAGE_LAYOUT_MAX_ENUM) { 3381 imgpair = {image, false, VkImageSubresource()}; 3382 auto imgsubIt = pCB->imageLayoutMap.find(imgpair); 3383 if (imgsubIt == pCB->imageLayoutMap.end()) 3384 return false; 3385 node = imgsubIt->second; 3386 } 3387 return true; 3388} 3389 3390// find layout(s) on the global level 3391bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) { 3392 layout = VK_IMAGE_LAYOUT_MAX_ENUM; 3393 FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT); 3394 FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT); 3395 FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT); 3396 FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT); 3397 if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) { 3398 imgpair = {imgpair.image, false, VkImageSubresource()}; 3399 auto imgsubIt = my_data->imageLayoutMap.find(imgpair); 3400 if (imgsubIt == my_data->imageLayoutMap.end()) 3401 return false; 3402 layout = imgsubIt->second.layout; 3403 } 3404 return true; 3405} 3406 3407bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) { 3408 ImageSubresourcePair imgpair = {image, true, range}; 3409 return FindLayout(my_data, imgpair, layout); 3410} 3411 3412bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) { 3413 auto sub_data = my_data->imageSubresourceMap.find(image); 3414 if (sub_data == my_data->imageSubresourceMap.end()) 3415 return false; 3416 auto img_node = getImageNode(my_data, image); 3417 if (!img_node) 3418 return false; 3419 bool ignoreGlobal = false; 3420 // TODO: Make this robust for >1 aspect mask. Now it will just say ignore 3421 // potential errors in this case. 3422 if (sub_data->second.size() >= (img_node->createInfo.arrayLayers * img_node->createInfo.mipLevels + 1)) { 3423 ignoreGlobal = true; 3424 } 3425 for (auto imgsubpair : sub_data->second) { 3426 if (ignoreGlobal && !imgsubpair.hasSubresource) 3427 continue; 3428 auto img_data = my_data->imageLayoutMap.find(imgsubpair); 3429 if (img_data != my_data->imageLayoutMap.end()) { 3430 layouts.push_back(img_data->second.layout); 3431 } 3432 } 3433 return true; 3434} 3435 3436// Set the layout on the global level 3437void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) { 3438 VkImage &image = imgpair.image; 3439 // TODO (mlentine): Maybe set format if new? Not used atm. 3440 my_data->imageLayoutMap[imgpair].layout = layout; 3441 // TODO (mlentine): Maybe make vector a set? 3442 auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair); 3443 if (subresource == my_data->imageSubresourceMap[image].end()) { 3444 my_data->imageSubresourceMap[image].push_back(imgpair); 3445 } 3446} 3447 3448// Set the layout on the cmdbuf level 3449void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) { 3450 pCB->imageLayoutMap[imgpair] = node; 3451 // TODO (mlentine): Maybe make vector a set? 3452 auto subresource = 3453 std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair); 3454 if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) { 3455 pCB->imageSubresourceMap[imgpair.image].push_back(imgpair); 3456 } 3457} 3458 3459void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) { 3460 // TODO (mlentine): Maybe make vector a set? 3461 if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) != 3462 pCB->imageSubresourceMap[imgpair.image].end()) { 3463 pCB->imageLayoutMap[imgpair].layout = layout; 3464 } else { 3465 // TODO (mlentine): Could be expensive and might need to be removed. 3466 assert(imgpair.hasSubresource); 3467 IMAGE_CMD_BUF_LAYOUT_NODE node; 3468 if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) { 3469 node.initialLayout = layout; 3470 } 3471 SetLayout(pCB, imgpair, {node.initialLayout, layout}); 3472 } 3473} 3474 3475template <class OBJECT, class LAYOUT> 3476void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) { 3477 if (imgpair.subresource.aspectMask & aspectMask) { 3478 imgpair.subresource.aspectMask = aspectMask; 3479 SetLayout(pObject, imgpair, layout); 3480 } 3481} 3482 3483template <class OBJECT, class LAYOUT> 3484void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) { 3485 ImageSubresourcePair imgpair = {image, true, range}; 3486 SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT); 3487 SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT); 3488 SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT); 3489 SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT); 3490} 3491 3492template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) { 3493 ImageSubresourcePair imgpair = {image, false, VkImageSubresource()}; 3494 SetLayout(pObject, image, imgpair, layout); 3495} 3496 3497void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) { 3498 auto iv_data = getImageViewData(dev_data, imageView); 3499 assert(iv_data); 3500 const VkImage &image = iv_data->image; 3501 const VkImageSubresourceRange &subRange = iv_data->subresourceRange; 3502 // TODO: Do not iterate over every possibility - consolidate where possible 3503 for (uint32_t j = 0; j < subRange.levelCount; j++) { 3504 uint32_t level = subRange.baseMipLevel + j; 3505 for (uint32_t k = 0; k < subRange.layerCount; k++) { 3506 uint32_t layer = subRange.baseArrayLayer + k; 3507 VkImageSubresource sub = {subRange.aspectMask, level, layer}; 3508 SetLayout(pCB, image, sub, layout); 3509 } 3510 } 3511} 3512 3513// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer 3514// func_str is the name of the calling function 3515// Return false if no errors occur 3516// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain) 3517static bool validateIdleDescriptorSet(const layer_data *my_data, VkDescriptorSet set, std::string func_str) { 3518 bool skip_call = false; 3519 auto set_node = my_data->setMap.find(set); 3520 if (set_node == my_data->setMap.end()) { 3521 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 3522 (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS", 3523 "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_str.c_str(), 3524 (uint64_t)(set)); 3525 } else { 3526 if (set_node->second->in_use.load()) { 3527 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 3528 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_OBJECT_INUSE, 3529 "DS", "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that is in use by a command buffer.", 3530 func_str.c_str(), (uint64_t)(set)); 3531 } 3532 } 3533 return skip_call; 3534} 3535 3536// Remove set from setMap and delete the set 3537static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) { 3538 dev_data->setMap.erase(descriptor_set->GetSet()); 3539 delete descriptor_set; 3540} 3541// Free all DS Pools including their Sets & related sub-structs 3542// NOTE : Calls to this function should be wrapped in mutex 3543static void deletePools(layer_data *my_data) { 3544 if (my_data->descriptorPoolMap.size() <= 0) 3545 return; 3546 for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) { 3547 // Remove this pools' sets from setMap and delete them 3548 for (auto ds : (*ii).second->sets) { 3549 freeDescriptorSet(my_data, ds); 3550 } 3551 (*ii).second->sets.clear(); 3552 } 3553 my_data->descriptorPoolMap.clear(); 3554} 3555 3556static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool, 3557 VkDescriptorPoolResetFlags flags) { 3558 DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pool); 3559 if (!pPool) { 3560 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, 3561 (uint64_t)pool, __LINE__, DRAWSTATE_INVALID_POOL, "DS", 3562 "Unable to find pool node for pool 0x%" PRIxLEAST64 " specified in vkResetDescriptorPool() call", (uint64_t)pool); 3563 } else { 3564 // TODO: validate flags 3565 // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet 3566 for (auto ds : pPool->sets) { 3567 freeDescriptorSet(my_data, ds); 3568 } 3569 pPool->sets.clear(); 3570 // Reset available count for each type and available sets for this pool 3571 for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) { 3572 pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i]; 3573 } 3574 pPool->availableSets = pPool->maxSets; 3575 } 3576} 3577 3578// For given CB object, fetch associated CB Node from map 3579static GLOBAL_CB_NODE *getCBNode(layer_data const *my_data, const VkCommandBuffer cb) { 3580 auto it = my_data->commandBufferMap.find(cb); 3581 if (it == my_data->commandBufferMap.end()) { 3582 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 3583 reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 3584 "Attempt to use CommandBuffer 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb)); 3585 return NULL; 3586 } 3587 return it->second; 3588} 3589// Free all CB Nodes 3590// NOTE : Calls to this function should be wrapped in mutex 3591static void deleteCommandBuffers(layer_data *my_data) { 3592 if (my_data->commandBufferMap.empty()) { 3593 return; 3594 } 3595 for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) { 3596 delete (*ii).second; 3597 } 3598 my_data->commandBufferMap.clear(); 3599} 3600 3601static bool report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) { 3602 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 3603 (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS", 3604 "You must call vkBeginCommandBuffer() before this call to %s", caller_name); 3605} 3606 3607bool validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) { 3608 if (!pCB->activeRenderPass) 3609 return false; 3610 bool skip_call = false; 3611 if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS && 3612 (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) { 3613 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3614 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 3615 "Commands cannot be called in a subpass using secondary command buffers."); 3616 } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) { 3617 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3618 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 3619 "vkCmdExecuteCommands() cannot be called in a subpass using inline commands."); 3620 } 3621 return skip_call; 3622} 3623 3624static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) { 3625 if (!(flags & VK_QUEUE_GRAPHICS_BIT)) 3626 return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3627 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 3628 "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name); 3629 return false; 3630} 3631 3632static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) { 3633 if (!(flags & VK_QUEUE_COMPUTE_BIT)) 3634 return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3635 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 3636 "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name); 3637 return false; 3638} 3639 3640static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) { 3641 if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT))) 3642 return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3643 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 3644 "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name); 3645 return false; 3646} 3647 3648// Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not 3649// in the recording state or if there's an issue with the Cmd ordering 3650static bool addCmd(layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) { 3651 bool skip_call = false; 3652 auto pPool = getCommandPoolNode(my_data, pCB->createInfo.commandPool); 3653 if (pPool) { 3654 VkQueueFlags flags = my_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].queueFlags; 3655 switch (cmd) { 3656 case CMD_BINDPIPELINE: 3657 case CMD_BINDPIPELINEDELTA: 3658 case CMD_BINDDESCRIPTORSETS: 3659 case CMD_FILLBUFFER: 3660 case CMD_CLEARCOLORIMAGE: 3661 case CMD_SETEVENT: 3662 case CMD_RESETEVENT: 3663 case CMD_WAITEVENTS: 3664 case CMD_BEGINQUERY: 3665 case CMD_ENDQUERY: 3666 case CMD_RESETQUERYPOOL: 3667 case CMD_COPYQUERYPOOLRESULTS: 3668 case CMD_WRITETIMESTAMP: 3669 skip_call |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str()); 3670 break; 3671 case CMD_SETVIEWPORTSTATE: 3672 case CMD_SETSCISSORSTATE: 3673 case CMD_SETLINEWIDTHSTATE: 3674 case CMD_SETDEPTHBIASSTATE: 3675 case CMD_SETBLENDSTATE: 3676 case CMD_SETDEPTHBOUNDSSTATE: 3677 case CMD_SETSTENCILREADMASKSTATE: 3678 case CMD_SETSTENCILWRITEMASKSTATE: 3679 case CMD_SETSTENCILREFERENCESTATE: 3680 case CMD_BINDINDEXBUFFER: 3681 case CMD_BINDVERTEXBUFFER: 3682 case CMD_DRAW: 3683 case CMD_DRAWINDEXED: 3684 case CMD_DRAWINDIRECT: 3685 case CMD_DRAWINDEXEDINDIRECT: 3686 case CMD_BLITIMAGE: 3687 case CMD_CLEARATTACHMENTS: 3688 case CMD_CLEARDEPTHSTENCILIMAGE: 3689 case CMD_RESOLVEIMAGE: 3690 case CMD_BEGINRENDERPASS: 3691 case CMD_NEXTSUBPASS: 3692 case CMD_ENDRENDERPASS: 3693 skip_call |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str()); 3694 break; 3695 case CMD_DISPATCH: 3696 case CMD_DISPATCHINDIRECT: 3697 skip_call |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str()); 3698 break; 3699 case CMD_COPYBUFFER: 3700 case CMD_COPYIMAGE: 3701 case CMD_COPYBUFFERTOIMAGE: 3702 case CMD_COPYIMAGETOBUFFER: 3703 case CMD_CLONEIMAGEDATA: 3704 case CMD_UPDATEBUFFER: 3705 case CMD_PIPELINEBARRIER: 3706 case CMD_EXECUTECOMMANDS: 3707 case CMD_END: 3708 break; 3709 default: 3710 break; 3711 } 3712 } 3713 if (pCB->state != CB_RECORDING) { 3714 skip_call |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name); 3715 } else { 3716 skip_call |= validateCmdsInCmdBuffer(my_data, pCB, cmd); 3717 CMD_NODE cmdNode = {}; 3718 // init cmd node and append to end of cmd LL 3719 cmdNode.cmdNumber = ++pCB->numCmds; 3720 cmdNode.type = cmd; 3721 pCB->cmds.push_back(cmdNode); 3722 } 3723 return skip_call; 3724} 3725// Reset the command buffer state 3726// Maintain the createInfo and set state to CB_NEW, but clear all other state 3727static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) { 3728 GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb]; 3729 if (pCB) { 3730 pCB->in_use.store(0); 3731 pCB->cmds.clear(); 3732 // Reset CB state (note that createInfo is not cleared) 3733 pCB->commandBuffer = cb; 3734 memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo)); 3735 memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo)); 3736 pCB->numCmds = 0; 3737 memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t)); 3738 pCB->state = CB_NEW; 3739 pCB->submitCount = 0; 3740 pCB->status = 0; 3741 pCB->viewports.clear(); 3742 pCB->scissors.clear(); 3743 3744 for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) { 3745 // Before clearing lastBoundState, remove any CB bindings from all uniqueBoundSets 3746 for (auto set : pCB->lastBound[i].uniqueBoundSets) { 3747 set->RemoveBoundCommandBuffer(pCB); 3748 } 3749 pCB->lastBound[i].reset(); 3750 } 3751 3752 memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo)); 3753 pCB->activeRenderPass = nullptr; 3754 pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE; 3755 pCB->activeSubpass = 0; 3756 pCB->broken_bindings.clear(); 3757 pCB->waitedEvents.clear(); 3758 pCB->events.clear(); 3759 pCB->writeEventsBeforeWait.clear(); 3760 pCB->waitedEventsBeforeQueryReset.clear(); 3761 pCB->queryToStateMap.clear(); 3762 pCB->activeQueries.clear(); 3763 pCB->startedQueries.clear(); 3764 pCB->imageSubresourceMap.clear(); 3765 pCB->imageLayoutMap.clear(); 3766 pCB->eventToStageMap.clear(); 3767 pCB->drawData.clear(); 3768 pCB->currentDrawData.buffers.clear(); 3769 pCB->primaryCommandBuffer = VK_NULL_HANDLE; 3770 // Make sure any secondaryCommandBuffers are removed from globalInFlight 3771 for (auto secondary_cb : pCB->secondaryCommandBuffers) { 3772 dev_data->globalInFlightCmdBuffers.erase(secondary_cb); 3773 } 3774 pCB->secondaryCommandBuffers.clear(); 3775 pCB->updateImages.clear(); 3776 pCB->updateBuffers.clear(); 3777 clear_cmd_buf_and_mem_references(dev_data, pCB); 3778 pCB->eventUpdates.clear(); 3779 pCB->queryUpdates.clear(); 3780 3781 // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list 3782 for (auto framebuffer : pCB->framebuffers) { 3783 auto fb_node = getFramebuffer(dev_data, framebuffer); 3784 if (fb_node) 3785 fb_node->cb_bindings.erase(pCB); 3786 } 3787 pCB->framebuffers.clear(); 3788 pCB->activeFramebuffer = VK_NULL_HANDLE; 3789 } 3790} 3791 3792// Set PSO-related status bits for CB, including dynamic state set via PSO 3793static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe) { 3794 // Account for any dynamic state not set via this PSO 3795 if (!pPipe->graphicsPipelineCI.pDynamicState || 3796 !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) { // All state is static 3797 pCB->status = CBSTATUS_ALL; 3798 } else { 3799 // First consider all state on 3800 // Then unset any state that's noted as dynamic in PSO 3801 // Finally OR that into CB statemask 3802 CBStatusFlags psoDynStateMask = CBSTATUS_ALL; 3803 for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) { 3804 switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) { 3805 case VK_DYNAMIC_STATE_VIEWPORT: 3806 psoDynStateMask &= ~CBSTATUS_VIEWPORT_SET; 3807 break; 3808 case VK_DYNAMIC_STATE_SCISSOR: 3809 psoDynStateMask &= ~CBSTATUS_SCISSOR_SET; 3810 break; 3811 case VK_DYNAMIC_STATE_LINE_WIDTH: 3812 psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET; 3813 break; 3814 case VK_DYNAMIC_STATE_DEPTH_BIAS: 3815 psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET; 3816 break; 3817 case VK_DYNAMIC_STATE_BLEND_CONSTANTS: 3818 psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET; 3819 break; 3820 case VK_DYNAMIC_STATE_DEPTH_BOUNDS: 3821 psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET; 3822 break; 3823 case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK: 3824 psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET; 3825 break; 3826 case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK: 3827 psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET; 3828 break; 3829 case VK_DYNAMIC_STATE_STENCIL_REFERENCE: 3830 psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET; 3831 break; 3832 default: 3833 // TODO : Flag error here 3834 break; 3835 } 3836 } 3837 pCB->status |= psoDynStateMask; 3838 } 3839} 3840 3841// Print the last bound Gfx Pipeline 3842static bool printPipeline(layer_data *my_data, const VkCommandBuffer cb) { 3843 bool skip_call = false; 3844 GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb); 3845 if (pCB) { 3846 PIPELINE_NODE *pPipeTrav = getPipeline(my_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline); 3847 if (!pPipeTrav) { 3848 // nothing to print 3849 } else { 3850 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 3851 __LINE__, DRAWSTATE_NONE, "DS", "%s", 3852 vk_print_vkgraphicspipelinecreateinfo( 3853 reinterpret_cast<const VkGraphicsPipelineCreateInfo *>(&pPipeTrav->graphicsPipelineCI), "{DS}") 3854 .c_str()); 3855 } 3856 } 3857 return skip_call; 3858} 3859 3860static void printCB(layer_data *my_data, const VkCommandBuffer cb) { 3861 GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb); 3862 if (pCB && pCB->cmds.size() > 0) { 3863 log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3864 DRAWSTATE_NONE, "DS", "Cmds in CB 0x%p", (void *)cb); 3865 vector<CMD_NODE> cmds = pCB->cmds; 3866 for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) { 3867 // TODO : Need to pass cb as srcObj here 3868 log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 3869 __LINE__, DRAWSTATE_NONE, "DS", " CMD 0x%" PRIx64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str()); 3870 } 3871 } else { 3872 // Nothing to print 3873 } 3874} 3875 3876static bool synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) { 3877 bool skip_call = false; 3878 if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) { 3879 return skip_call; 3880 } 3881 skip_call |= printPipeline(my_data, cb); 3882 return skip_call; 3883} 3884 3885// Flags validation error if the associated call is made inside a render pass. The apiName 3886// routine should ONLY be called outside a render pass. 3887static bool insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) { 3888 bool inside = false; 3889 if (pCB->activeRenderPass) { 3890 inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 3891 (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS", 3892 "%s: It is invalid to issue this call inside an active render pass (0x%" PRIxLEAST64 ")", apiName, 3893 (uint64_t)pCB->activeRenderPass->renderPass); 3894 } 3895 return inside; 3896} 3897 3898// Flags validation error if the associated call is made outside a render pass. The apiName 3899// routine should ONLY be called inside a render pass. 3900static bool outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) { 3901 bool outside = false; 3902 if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) || 3903 ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) && 3904 !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) { 3905 outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 3906 (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS", 3907 "%s: This call must be issued inside an active render pass.", apiName); 3908 } 3909 return outside; 3910} 3911 3912static void init_core_validation(layer_data *instance_data, const VkAllocationCallbacks *pAllocator) { 3913 3914 layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation"); 3915 3916} 3917 3918VKAPI_ATTR VkResult VKAPI_CALL 3919CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) { 3920 VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO); 3921 3922 assert(chain_info->u.pLayerInfo); 3923 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr; 3924 PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance"); 3925 if (fpCreateInstance == NULL) 3926 return VK_ERROR_INITIALIZATION_FAILED; 3927 3928 // Advance the link info for the next element on the chain 3929 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext; 3930 3931 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance); 3932 if (result != VK_SUCCESS) 3933 return result; 3934 3935 layer_data *instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map); 3936 instance_data->instance = *pInstance; 3937 instance_data->instance_dispatch_table = new VkLayerInstanceDispatchTable; 3938 layer_init_instance_dispatch_table(*pInstance, instance_data->instance_dispatch_table, fpGetInstanceProcAddr); 3939 3940 instance_data->report_data = 3941 debug_report_create_instance(instance_data->instance_dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, 3942 pCreateInfo->ppEnabledExtensionNames); 3943 init_core_validation(instance_data, pAllocator); 3944 3945 instance_data->instance_state = unique_ptr<INSTANCE_STATE>(new INSTANCE_STATE()); 3946 ValidateLayerOrdering(*pCreateInfo); 3947 3948 return result; 3949} 3950 3951/* hook DestroyInstance to remove tableInstanceMap entry */ 3952VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) { 3953 // TODOSC : Shouldn't need any customization here 3954 dispatch_key key = get_dispatch_key(instance); 3955 // TBD: Need any locking this early, in case this function is called at the 3956 // same time by more than one thread? 3957 layer_data *my_data = get_my_data_ptr(key, layer_data_map); 3958 VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table; 3959 pTable->DestroyInstance(instance, pAllocator); 3960 3961 std::lock_guard<std::mutex> lock(global_lock); 3962 // Clean up logging callback, if any 3963 while (my_data->logging_callback.size() > 0) { 3964 VkDebugReportCallbackEXT callback = my_data->logging_callback.back(); 3965 layer_destroy_msg_callback(my_data->report_data, callback, pAllocator); 3966 my_data->logging_callback.pop_back(); 3967 } 3968 3969 layer_debug_report_destroy_instance(my_data->report_data); 3970 delete my_data->instance_dispatch_table; 3971 layer_data_map.erase(key); 3972} 3973 3974static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) { 3975 uint32_t i; 3976 // TBD: Need any locking, in case this function is called at the same time 3977 // by more than one thread? 3978 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 3979 dev_data->device_extensions.wsi_enabled = false; 3980 3981 VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table; 3982 PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr; 3983 pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR"); 3984 pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR"); 3985 pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR"); 3986 pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR"); 3987 pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR"); 3988 3989 for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) { 3990 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0) 3991 dev_data->device_extensions.wsi_enabled = true; 3992 } 3993} 3994 3995// Verify that queue family has been properly requested 3996bool ValidateRequestedQueueFamilyProperties(layer_data *dev_data, const VkDeviceCreateInfo *create_info) { 3997 bool skip_call = false; 3998 // First check is app has actually requested queueFamilyProperties 3999 if (!dev_data->physical_device_state) { 4000 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 4001 0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL", 4002 "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices()."); 4003 } else if (QUERY_DETAILS != dev_data->physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) { 4004 // TODO: This is not called out as an invalid use in the spec so make more informative recommendation. 4005 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, 4006 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, 4007 "DL", "Call to vkCreateDevice() w/o first calling vkGetPhysicalDeviceQueueFamilyProperties()."); 4008 } else { 4009 // Check that the requested queue properties are valid 4010 for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++) { 4011 uint32_t requestedIndex = create_info->pQueueCreateInfos[i].queueFamilyIndex; 4012 if (dev_data->queue_family_properties.size() <= 4013 requestedIndex) { // requested index is out of bounds for this physical device 4014 skip_call |= log_msg( 4015 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, 4016 __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL", 4017 "Invalid queue create request in vkCreateDevice(). Invalid queueFamilyIndex %u requested.", requestedIndex); 4018 } else if (create_info->pQueueCreateInfos[i].queueCount > 4019 dev_data->queue_family_properties[requestedIndex]->queueCount) { 4020 skip_call |= 4021 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 4022 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL", 4023 "Invalid queue create request in vkCreateDevice(). QueueFamilyIndex %u only has %u queues, but " 4024 "requested queueCount is %u.", 4025 requestedIndex, dev_data->queue_family_properties[requestedIndex]->queueCount, 4026 create_info->pQueueCreateInfos[i].queueCount); 4027 } 4028 } 4029 } 4030 return skip_call; 4031} 4032 4033// Verify that features have been queried and that they are available 4034static bool ValidateRequestedFeatures(layer_data *dev_data, const VkPhysicalDeviceFeatures *requested_features) { 4035 bool skip_call = false; 4036 4037 VkBool32 *actual = reinterpret_cast<VkBool32 *>(&(dev_data->physical_device_features)); 4038 const VkBool32 *requested = reinterpret_cast<const VkBool32 *>(requested_features); 4039 // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues 4040 // Need to provide the struct member name with the issue. To do that seems like we'll 4041 // have to loop through each struct member which should be done w/ codegen to keep in synch. 4042 uint32_t errors = 0; 4043 uint32_t total_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32); 4044 for (uint32_t i = 0; i < total_bools; i++) { 4045 if (requested[i] > actual[i]) { 4046 // TODO: Add index to struct member name helper to be able to include a feature name 4047 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 4048 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED, 4049 "DL", "While calling vkCreateDevice(), requesting feature #%u in VkPhysicalDeviceFeatures struct, " 4050 "which is not available on this device.", 4051 i); 4052 errors++; 4053 } 4054 } 4055 if (errors && (UNCALLED == dev_data->physical_device_state->vkGetPhysicalDeviceFeaturesState)) { 4056 // If user didn't request features, notify them that they should 4057 // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error 4058 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 4059 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED, 4060 "DL", "You requested features that are unavailable on this device. You should first query feature " 4061 "availability by calling vkGetPhysicalDeviceFeatures()."); 4062 } 4063 return skip_call; 4064} 4065 4066VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, 4067 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) { 4068 layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map); 4069 bool skip_call = false; 4070 4071 // Check that any requested features are available 4072 if (pCreateInfo->pEnabledFeatures) { 4073 skip_call |= ValidateRequestedFeatures(my_instance_data, pCreateInfo->pEnabledFeatures); 4074 } 4075 skip_call |= ValidateRequestedQueueFamilyProperties(my_instance_data, pCreateInfo); 4076 4077 VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO); 4078 4079 assert(chain_info->u.pLayerInfo); 4080 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr; 4081 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr; 4082 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice"); 4083 if (fpCreateDevice == NULL) { 4084 return VK_ERROR_INITIALIZATION_FAILED; 4085 } 4086 4087 // Advance the link info for the next element on the chain 4088 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext; 4089 4090 VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice); 4091 if (result != VK_SUCCESS) { 4092 return result; 4093 } 4094 4095 std::unique_lock<std::mutex> lock(global_lock); 4096 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map); 4097 4098 // Setup device dispatch table 4099 my_device_data->device_dispatch_table = new VkLayerDispatchTable; 4100 layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr); 4101 my_device_data->device = *pDevice; 4102 4103 my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice); 4104 createDeviceRegisterExtensions(pCreateInfo, *pDevice); 4105 // Get physical device limits for this device 4106 my_instance_data->instance_dispatch_table->GetPhysicalDeviceProperties(gpu, &(my_device_data->phys_dev_properties.properties)); 4107 uint32_t count; 4108 my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr); 4109 my_device_data->phys_dev_properties.queue_family_properties.resize(count); 4110 my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties( 4111 gpu, &count, &my_device_data->phys_dev_properties.queue_family_properties[0]); 4112 // TODO: device limits should make sure these are compatible 4113 if (pCreateInfo->pEnabledFeatures) { 4114 my_device_data->phys_dev_properties.features = *pCreateInfo->pEnabledFeatures; 4115 } else { 4116 memset(&my_device_data->phys_dev_properties.features, 0, sizeof(VkPhysicalDeviceFeatures)); 4117 } 4118 // Store physical device mem limits into device layer_data struct 4119 my_instance_data->instance_dispatch_table->GetPhysicalDeviceMemoryProperties(gpu, &my_device_data->phys_dev_mem_props); 4120 lock.unlock(); 4121 4122 ValidateLayerOrdering(*pCreateInfo); 4123 4124 return result; 4125} 4126 4127// prototype 4128static void deleteRenderPasses(layer_data *); 4129VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) { 4130 // TODOSC : Shouldn't need any customization here 4131 dispatch_key key = get_dispatch_key(device); 4132 layer_data *dev_data = get_my_data_ptr(key, layer_data_map); 4133 // Free all the memory 4134 std::unique_lock<std::mutex> lock(global_lock); 4135 deletePipelines(dev_data); 4136 deleteRenderPasses(dev_data); 4137 deleteCommandBuffers(dev_data); 4138 // This will also delete all sets in the pool & remove them from setMap 4139 deletePools(dev_data); 4140 // All sets should be removed 4141 assert(dev_data->setMap.empty()); 4142 for (auto del_layout : dev_data->descriptorSetLayoutMap) { 4143 delete del_layout.second; 4144 } 4145 dev_data->descriptorSetLayoutMap.clear(); 4146 dev_data->imageViewMap.clear(); 4147 dev_data->imageMap.clear(); 4148 dev_data->imageSubresourceMap.clear(); 4149 dev_data->imageLayoutMap.clear(); 4150 dev_data->bufferViewMap.clear(); 4151 dev_data->bufferMap.clear(); 4152 // Queues persist until device is destroyed 4153 dev_data->queueMap.clear(); 4154 lock.unlock(); 4155#if MTMERGESOURCE 4156 bool skip_call = false; 4157 lock.lock(); 4158 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 4159 (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()"); 4160 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 4161 (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================"); 4162 print_mem_list(dev_data); 4163 printCBList(dev_data); 4164 // Report any memory leaks 4165 DEVICE_MEM_INFO *pInfo = NULL; 4166 if (!dev_data->memObjMap.empty()) { 4167 for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) { 4168 pInfo = (*ii).second.get(); 4169 if (pInfo->allocInfo.allocationSize != 0) { 4170 // Valid Usage: All child objects created on device must have been destroyed prior to destroying device 4171 skip_call |= 4172 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 4173 (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK, "MEM", 4174 "Mem Object 0x%" PRIx64 " has not been freed. You should clean up this memory by calling " 4175 "vkFreeMemory(0x%" PRIx64 ") prior to vkDestroyDevice().", 4176 (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem)); 4177 } 4178 } 4179 } 4180 layer_debug_report_destroy_device(device); 4181 lock.unlock(); 4182 4183#if DISPATCH_MAP_DEBUG 4184 fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key); 4185#endif 4186 VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table; 4187 if (!skip_call) { 4188 pDisp->DestroyDevice(device, pAllocator); 4189 } 4190#else 4191 dev_data->device_dispatch_table->DestroyDevice(device, pAllocator); 4192#endif 4193 delete dev_data->device_dispatch_table; 4194 layer_data_map.erase(key); 4195} 4196 4197static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}}; 4198 4199// This validates that the initial layout specified in the command buffer for 4200// the IMAGE is the same 4201// as the global IMAGE layout 4202static bool ValidateCmdBufImageLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB) { 4203 bool skip_call = false; 4204 for (auto cb_image_data : pCB->imageLayoutMap) { 4205 VkImageLayout imageLayout; 4206 if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) { 4207 skip_call |= 4208 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 4209 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image 0x%" PRIx64 ".", 4210 reinterpret_cast<const uint64_t &>(cb_image_data.first)); 4211 } else { 4212 if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) { 4213 // TODO: Set memory invalid which is in mem_tracker currently 4214 } else if (imageLayout != cb_image_data.second.initialLayout) { 4215 if (cb_image_data.first.hasSubresource) { 4216 skip_call |= log_msg( 4217 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 4218 reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 4219 "Cannot submit cmd buffer using image (0x%" PRIx64 ") [sub-resource: aspectMask 0x%X array layer %u, mip level %u], " 4220 "with layout %s when first use is %s.", 4221 reinterpret_cast<const uint64_t &>(cb_image_data.first.image), cb_image_data.first.subresource.aspectMask, 4222 cb_image_data.first.subresource.arrayLayer, 4223 cb_image_data.first.subresource.mipLevel, string_VkImageLayout(imageLayout), 4224 string_VkImageLayout(cb_image_data.second.initialLayout)); 4225 } else { 4226 skip_call |= log_msg( 4227 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 4228 reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 4229 "Cannot submit cmd buffer using image (0x%" PRIx64 ") with layout %s when " 4230 "first use is %s.", 4231 reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout), 4232 string_VkImageLayout(cb_image_data.second.initialLayout)); 4233 } 4234 } 4235 SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout); 4236 } 4237 } 4238 return skip_call; 4239} 4240 4241// Track which resources are in-flight by atomically incrementing their "in_use" count 4242static bool validateAndIncrementResources(layer_data *my_data, GLOBAL_CB_NODE *pCB) { 4243 bool skip_call = false; 4244 4245 pCB->in_use.fetch_add(1); 4246 my_data->globalInFlightCmdBuffers.insert(pCB->commandBuffer); 4247 4248 for (auto drawDataElement : pCB->drawData) { 4249 for (auto buffer : drawDataElement.buffers) { 4250 auto buffer_node = getBufferNode(my_data, buffer); 4251 if (!buffer_node) { 4252 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, 4253 (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS", 4254 "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", (uint64_t)(buffer)); 4255 } else { 4256 buffer_node->in_use.fetch_add(1); 4257 } 4258 } 4259 } 4260 for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) { 4261 for (auto set : pCB->lastBound[i].uniqueBoundSets) { 4262 if (!my_data->setMap.count(set->GetSet())) { 4263 skip_call |= 4264 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 4265 (uint64_t)(set), __LINE__, DRAWSTATE_INVALID_DESCRIPTOR_SET, "DS", 4266 "Cannot submit cmd buffer using deleted descriptor set 0x%" PRIx64 ".", (uint64_t)(set)); 4267 } else { 4268 set->in_use.fetch_add(1); 4269 } 4270 } 4271 } 4272 for (auto event : pCB->events) { 4273 auto eventNode = my_data->eventMap.find(event); 4274 if (eventNode == my_data->eventMap.end()) { 4275 skip_call |= 4276 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 4277 reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS", 4278 "Cannot submit cmd buffer using deleted event 0x%" PRIx64 ".", reinterpret_cast<uint64_t &>(event)); 4279 } else { 4280 eventNode->second.in_use.fetch_add(1); 4281 } 4282 } 4283 for (auto event : pCB->writeEventsBeforeWait) { 4284 auto eventNode = my_data->eventMap.find(event); 4285 eventNode->second.write_in_use++; 4286 } 4287 return skip_call; 4288} 4289 4290// Note: This function assumes that the global lock is held by the calling 4291// thread. 4292static bool cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) { 4293 bool skip_call = false; 4294 GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer); 4295 if (pCB) { 4296 for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) { 4297 for (auto event : queryEventsPair.second) { 4298 if (my_data->eventMap[event].needsSignaled) { 4299 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 4300 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS", 4301 "Cannot get query results on queryPool 0x%" PRIx64 4302 " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".", 4303 (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event)); 4304 } 4305 } 4306 } 4307 } 4308 return skip_call; 4309} 4310// Decrement cmd_buffer in_use and if it goes to 0 remove cmd_buffer from globalInFlightCmdBuffers 4311static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer) { 4312 // Pull it off of global list initially, but if we find it in any other queue list, add it back in 4313 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmd_buffer); 4314 pCB->in_use.fetch_sub(1); 4315 if (!pCB->in_use.load()) { 4316 dev_data->globalInFlightCmdBuffers.erase(cmd_buffer); 4317 } 4318} 4319 4320static void decrementResources(layer_data *my_data, CB_SUBMISSION *submission) { 4321 for (auto cb : submission->cbs) { 4322 auto pCB = getCBNode(my_data, cb); 4323 for (auto drawDataElement : pCB->drawData) { 4324 for (auto buffer : drawDataElement.buffers) { 4325 auto buffer_node = getBufferNode(my_data, buffer); 4326 if (buffer_node) { 4327 buffer_node->in_use.fetch_sub(1); 4328 } 4329 } 4330 } 4331 for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) { 4332 for (auto set : pCB->lastBound[i].uniqueBoundSets) { 4333 set->in_use.fetch_sub(1); 4334 } 4335 } 4336 for (auto event : pCB->events) { 4337 auto eventNode = my_data->eventMap.find(event); 4338 if (eventNode != my_data->eventMap.end()) { 4339 eventNode->second.in_use.fetch_sub(1); 4340 } 4341 } 4342 for (auto event : pCB->writeEventsBeforeWait) { 4343 auto eventNode = my_data->eventMap.find(event); 4344 if (eventNode != my_data->eventMap.end()) { 4345 eventNode->second.write_in_use--; 4346 } 4347 } 4348 for (auto queryStatePair : pCB->queryToStateMap) { 4349 my_data->queryToStateMap[queryStatePair.first] = queryStatePair.second; 4350 } 4351 for (auto eventStagePair : pCB->eventToStageMap) { 4352 my_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second; 4353 } 4354 } 4355 4356 for (auto semaphore : submission->semaphores) { 4357 auto pSemaphore = getSemaphoreNode(my_data, semaphore); 4358 if (pSemaphore) { 4359 pSemaphore->in_use.fetch_sub(1); 4360 } 4361 } 4362} 4363// For fenceCount fences in pFences, mark fence signaled, decrement in_use, and call 4364// decrementResources for all priorFences and cmdBuffers associated with fence. 4365static bool decrementResources(layer_data *my_data, uint32_t fenceCount, const VkFence *pFences) { 4366 bool skip_call = false; 4367 std::vector<std::pair<VkFence, FENCE_NODE *>> fence_pairs; 4368 for (uint32_t i = 0; i < fenceCount; ++i) { 4369 auto pFence = getFenceNode(my_data, pFences[i]); 4370 if (!pFence || pFence->state != FENCE_INFLIGHT) 4371 continue; 4372 4373 fence_pairs.emplace_back(pFences[i], pFence); 4374 pFence->state = FENCE_RETIRED; 4375 4376 decrementResources(my_data, static_cast<uint32_t>(pFence->priorFences.size()), 4377 pFence->priorFences.data()); 4378 for (auto & submission : pFence->submissions) { 4379 decrementResources(my_data, &submission); 4380 for (auto cb : submission.cbs) { 4381 skip_call |= cleanInFlightCmdBuffer(my_data, cb); 4382 removeInFlightCmdBuffer(my_data, cb); 4383 } 4384 } 4385 pFence->submissions.clear(); 4386 pFence->priorFences.clear(); 4387 } 4388 for (auto fence_pair : fence_pairs) { 4389 for (auto queue : fence_pair.second->queues) { 4390 auto pQueue = getQueueNode(my_data, queue); 4391 if (pQueue) { 4392 auto last_fence_data = 4393 std::find(pQueue->lastFences.begin(), pQueue->lastFences.end(), fence_pair.first); 4394 if (last_fence_data != pQueue->lastFences.end()) 4395 pQueue->lastFences.erase(last_fence_data); 4396 } 4397 } 4398 for (auto& fence_data : my_data->fenceMap) { 4399 auto prior_fence_data = 4400 std::find(fence_data.second.priorFences.begin(), fence_data.second.priorFences.end(), fence_pair.first); 4401 if (prior_fence_data != fence_data.second.priorFences.end()) 4402 fence_data.second.priorFences.erase(prior_fence_data); 4403 } 4404 } 4405 return skip_call; 4406} 4407// Decrement in_use for all outstanding cmd buffers that were submitted on this queue 4408static bool decrementResources(layer_data *my_data, VkQueue queue) { 4409 bool skip_call = false; 4410 auto queue_data = my_data->queueMap.find(queue); 4411 if (queue_data != my_data->queueMap.end()) { 4412 for (auto & submission : queue_data->second.untrackedSubmissions) { 4413 decrementResources(my_data, &submission); 4414 for (auto cb : submission.cbs) { 4415 skip_call |= cleanInFlightCmdBuffer(my_data, cb); 4416 removeInFlightCmdBuffer(my_data, cb); 4417 } 4418 } 4419 queue_data->second.untrackedSubmissions.clear(); 4420 skip_call |= decrementResources(my_data, static_cast<uint32_t>(queue_data->second.lastFences.size()), 4421 queue_data->second.lastFences.data()); 4422 } 4423 return skip_call; 4424} 4425 4426// This function merges command buffer tracking between queues when there is a semaphore dependency 4427// between them (see below for details as to how tracking works). When this happens, the prior 4428// fences from the signaling queue are merged into the wait queue as well as any untracked command 4429// buffers. 4430static void updateTrackedCommandBuffers(layer_data *dev_data, VkQueue queue, VkQueue other_queue, VkFence fence) { 4431 if (queue == other_queue) { 4432 return; 4433 } 4434 auto pQueue = getQueueNode(dev_data, queue); 4435 auto pOtherQueue = getQueueNode(dev_data, other_queue); 4436 if (!pQueue || !pOtherQueue) { 4437 return; 4438 } 4439 for (auto fenceInner : pOtherQueue->lastFences) { 4440 pQueue->lastFences.push_back(fenceInner); 4441 auto pFenceInner = getFenceNode(dev_data, fenceInner); 4442 if (pFenceInner) 4443 pFenceInner->queues.insert(other_queue); 4444 } 4445 // TODO: Stealing the untracked CBs out of the signaling queue isn't really 4446 // correct. A subsequent submission + wait, or a QWI on that queue, or 4447 // another semaphore dependency to a third queue may /all/ provide 4448 // suitable proof that the work we're stealing here has completed on the 4449 // device, but we've lost that information by moving the tracking between 4450 // queues. 4451 auto pFence = getFenceNode(dev_data, fence); 4452 if (pFence) { 4453 for (auto submission : pOtherQueue->untrackedSubmissions) { 4454 pFence->submissions.push_back(submission); 4455 } 4456 pOtherQueue->untrackedSubmissions.clear(); 4457 } else { 4458 for (auto submission : pOtherQueue->untrackedSubmissions) { 4459 pQueue->untrackedSubmissions.push_back(submission); 4460 } 4461 pOtherQueue->untrackedSubmissions.clear(); 4462 } 4463 for (auto eventStagePair : pOtherQueue->eventToStageMap) { 4464 pQueue->eventToStageMap[eventStagePair.first] = eventStagePair.second; 4465 } 4466 for (auto queryStatePair : pOtherQueue->queryToStateMap) { 4467 pQueue->queryToStateMap[queryStatePair.first] = queryStatePair.second; 4468 } 4469} 4470 4471// This is the core function for tracking command buffers. There are two primary ways command 4472// buffers are tracked. When submitted they are stored in the command buffer list associated 4473// with a fence or the untracked command buffer list associated with a queue if no fence is used. 4474// Each queue also stores the last fence that was submitted onto the queue. This allows us to 4475// create a linked list of fences and their associated command buffers so if one fence is 4476// waited on, prior fences on that queue are also considered to have been waited on. When a fence is 4477// waited on (either via a queue, device or fence), we free the cmd buffers for that fence and 4478// recursively call with the prior fences. 4479 4480 4481// Submit a fence to a queue, delimiting previous fences and previous untracked 4482// work by it. 4483static void 4484SubmitFence(QUEUE_NODE *pQueue, FENCE_NODE *pFence) 4485{ 4486 assert(!pFence->priorFences.size()); 4487 assert(!pFence->submissions.size()); 4488 4489 std::swap(pFence->priorFences, pQueue->lastFences); 4490 std::swap(pFence->submissions, pQueue->untrackedSubmissions); 4491 4492 pFence->queues.insert(pQueue->queue); 4493 pFence->state = FENCE_INFLIGHT; 4494 4495 pQueue->lastFences.push_back(pFence->fence); 4496} 4497 4498static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) { 4499 bool skip_call = false; 4500 if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) && 4501 !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) { 4502 skip_call |= 4503 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 4504 __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS", 4505 "Command Buffer 0x%" PRIx64 " is already in use and is not marked for simultaneous use.", 4506 reinterpret_cast<uint64_t>(pCB->commandBuffer)); 4507 } 4508 return skip_call; 4509} 4510 4511static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) { 4512 bool skip_call = false; 4513 // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once 4514 if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) { 4515 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 4516 0, __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS", 4517 "CB 0x%" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT " 4518 "set, but has been submitted 0x%" PRIxLEAST64 " times.", 4519 (uint64_t)(pCB->commandBuffer), pCB->submitCount); 4520 } 4521 // Validate that cmd buffers have been updated 4522 if (CB_RECORDED != pCB->state) { 4523 if (CB_INVALID == pCB->state) { 4524 // Inform app of reason CB invalid 4525 for (auto obj : pCB->broken_bindings) { 4526 const char *type_str = object_type_to_string(obj.type); 4527 // Descriptor sets are a special case that can be either destroyed or updated to invalidated a CB 4528 const char *cause_str = 4529 (obj.type == VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT) ? "destroyed or updated" : "destroyed"; 4530 4531 skip_call |= 4532 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 4533 reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 4534 "You are submitting command buffer 0x%" PRIxLEAST64 " that is invalid because bound %s 0x%" PRIxLEAST64 4535 " was %s.", 4536 reinterpret_cast<uint64_t &>(pCB->commandBuffer), type_str, obj.handle, cause_str); 4537 } 4538 } else { // Flag error for using CB w/o vkEndCommandBuffer() called 4539 skip_call |= 4540 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 4541 (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS", 4542 "You must call vkEndCommandBuffer() on CB 0x%" PRIxLEAST64 " before this call to vkQueueSubmit()!", 4543 (uint64_t)(pCB->commandBuffer)); 4544 } 4545 } 4546 return skip_call; 4547} 4548 4549static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) { 4550 // Track in-use for resources off of primary and any secondary CBs 4551 bool skip_call = false; 4552 4553 // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing 4554 // on device 4555 skip_call |= validateCommandBufferSimultaneousUse(dev_data, pCB); 4556 4557 skip_call |= validateAndIncrementResources(dev_data, pCB); 4558 4559 if (!pCB->secondaryCommandBuffers.empty()) { 4560 for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) { 4561 GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer); 4562 skip_call |= validateAndIncrementResources(dev_data, pSubCB); 4563 if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) && 4564 !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) { 4565 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 4566 __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS", 4567 "CB 0x%" PRIxLEAST64 " was submitted with secondary buffer 0x%" PRIxLEAST64 4568 " but that buffer has subsequently been bound to " 4569 "primary cmd buffer 0x%" PRIxLEAST64 4570 " and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.", 4571 reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer), 4572 reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer)); 4573 } 4574 } 4575 } 4576 4577 skip_call |= validateCommandBufferState(dev_data, pCB); 4578 4579 return skip_call; 4580} 4581 4582static bool 4583ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence) 4584{ 4585 bool skip_call = false; 4586 4587 if (pFence) { 4588 if (pFence->state == FENCE_INFLIGHT) { 4589 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, 4590 (uint64_t)(pFence->fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS", 4591 "Fence 0x%" PRIx64 " is already in use by another submission.", (uint64_t)(pFence->fence)); 4592 } 4593 4594 else if (pFence->state == FENCE_RETIRED) { 4595 skip_call |= 4596 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, 4597 reinterpret_cast<uint64_t &>(pFence->fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM", 4598 "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state. Fences must be reset before being submitted", 4599 reinterpret_cast<uint64_t &>(pFence->fence)); 4600 } 4601 } 4602 4603 return skip_call; 4604} 4605 4606 4607VKAPI_ATTR VkResult VKAPI_CALL 4608QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) { 4609 bool skip_call = false; 4610 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map); 4611 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 4612 std::unique_lock<std::mutex> lock(global_lock); 4613 4614 auto pQueue = getQueueNode(dev_data, queue); 4615 auto pFence = getFenceNode(dev_data, fence); 4616 skip_call |= ValidateFenceForSubmit(dev_data, pFence); 4617 4618 if (skip_call) { 4619 return VK_ERROR_VALIDATION_FAILED_EXT; 4620 } 4621 4622 // TODO : Review these old print functions and clean up as appropriate 4623 print_mem_list(dev_data); 4624 printCBList(dev_data); 4625 4626 // Mark the fence in-use. 4627 if (pFence) { 4628 SubmitFence(pQueue, pFence); 4629 } 4630 4631 // If a fence is supplied, all the command buffers for this call will be 4632 // delimited by that fence. Otherwise, they go in the untracked portion of 4633 // the queue, and may end up being delimited by a fence supplied in a 4634 // subsequent submission. 4635 auto & submitTarget = pFence ? pFence->submissions : pQueue->untrackedSubmissions; 4636 4637 // Now verify each individual submit 4638 std::unordered_set<VkQueue> processed_other_queues; 4639 for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { 4640 const VkSubmitInfo *submit = &pSubmits[submit_idx]; 4641 vector<VkSemaphore> semaphoreList; 4642 for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) { 4643 VkSemaphore semaphore = submit->pWaitSemaphores[i]; 4644 auto pSemaphore = getSemaphoreNode(dev_data, semaphore); 4645 semaphoreList.push_back(semaphore); 4646 if (pSemaphore) { 4647 if (pSemaphore->signaled) { 4648 pSemaphore->signaled = false; 4649 pSemaphore->in_use.fetch_add(1); 4650 } else { 4651 skip_call |= 4652 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, 4653 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS", 4654 "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.", 4655 reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore)); 4656 } 4657 VkQueue other_queue = pSemaphore->queue; 4658 if (other_queue != VK_NULL_HANDLE && !processed_other_queues.count(other_queue)) { 4659 updateTrackedCommandBuffers(dev_data, queue, other_queue, fence); 4660 processed_other_queues.insert(other_queue); 4661 } 4662 } 4663 } 4664 for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) { 4665 VkSemaphore semaphore = submit->pSignalSemaphores[i]; 4666 auto pSemaphore = getSemaphoreNode(dev_data, semaphore); 4667 if (pSemaphore) { 4668 semaphoreList.push_back(semaphore); 4669 if (pSemaphore->signaled) { 4670 skip_call |= 4671 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, 4672 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS", 4673 "Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64 4674 " that has already been signaled but not waited on by queue 0x%" PRIx64 ".", 4675 reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore), 4676 reinterpret_cast<uint64_t &>(pSemaphore->queue)); 4677 } else { 4678 pSemaphore->signaled = true; 4679 pSemaphore->queue = queue; 4680 pSemaphore->in_use.fetch_add(1); 4681 } 4682 } 4683 } 4684 4685 std::vector<VkCommandBuffer> cbs; 4686 4687 for (uint32_t i = 0; i < submit->commandBufferCount; i++) { 4688 auto pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]); 4689 skip_call |= ValidateCmdBufImageLayouts(dev_data, pCBNode); 4690 if (pCBNode) { 4691 cbs.push_back(submit->pCommandBuffers[i]); 4692 for (auto secondaryCmdBuffer : pCBNode->secondaryCommandBuffers) { 4693 cbs.push_back(secondaryCmdBuffer); 4694 } 4695 4696 pCBNode->submitCount++; // increment submit count 4697 skip_call |= validatePrimaryCommandBufferState(dev_data, pCBNode); 4698 // Call submit-time functions to validate/update state 4699 for (auto &function : pCBNode->validate_functions) { 4700 skip_call |= function(); 4701 } 4702 for (auto &function : pCBNode->eventUpdates) { 4703 skip_call |= function(queue); 4704 } 4705 for (auto &function : pCBNode->queryUpdates) { 4706 skip_call |= function(queue); 4707 } 4708 } 4709 } 4710 4711 submitTarget.emplace_back(cbs, semaphoreList); 4712 } 4713 lock.unlock(); 4714 if (!skip_call) 4715 result = dev_data->device_dispatch_table->QueueSubmit(queue, submitCount, pSubmits, fence); 4716 4717 return result; 4718} 4719 4720VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo, 4721 const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) { 4722 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 4723 VkResult result = my_data->device_dispatch_table->AllocateMemory(device, pAllocateInfo, pAllocator, pMemory); 4724 // TODO : Track allocations and overall size here 4725 std::lock_guard<std::mutex> lock(global_lock); 4726 add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo); 4727 print_mem_list(my_data); 4728 return result; 4729} 4730 4731VKAPI_ATTR void VKAPI_CALL 4732FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) { 4733 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 4734 4735 // From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed. 4736 // Before freeing a memory object, an application must ensure the memory object is no longer 4737 // in use by the device—for example by command buffers queued for execution. The memory need 4738 // not yet be unbound from all images and buffers, but any further use of those images or 4739 // buffers (on host or device) for anything other than destroying those objects will result in 4740 // undefined behavior. 4741 4742 std::unique_lock<std::mutex> lock(global_lock); 4743 freeMemObjInfo(my_data, device, mem, false); 4744 print_mem_list(my_data); 4745 printCBList(my_data); 4746 lock.unlock(); 4747 my_data->device_dispatch_table->FreeMemory(device, mem, pAllocator); 4748} 4749 4750static bool validateMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) { 4751 bool skip_call = false; 4752 4753 if (size == 0) { 4754 skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 4755 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM", 4756 "VkMapMemory: Attempting to map memory range of size zero"); 4757 } 4758 4759 auto mem_element = my_data->memObjMap.find(mem); 4760 if (mem_element != my_data->memObjMap.end()) { 4761 auto mem_info = mem_element->second.get(); 4762 // It is an application error to call VkMapMemory on an object that is already mapped 4763 if (mem_info->memRange.size != 0) { 4764 skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 4765 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM", 4766 "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIxLEAST64, (uint64_t)mem); 4767 } 4768 4769 // Validate that offset + size is within object's allocationSize 4770 if (size == VK_WHOLE_SIZE) { 4771 if (offset >= mem_info->allocInfo.allocationSize) { 4772 skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 4773 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, 4774 "MEM", "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 4775 " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64, 4776 offset, mem_info->allocInfo.allocationSize, mem_info->allocInfo.allocationSize); 4777 } 4778 } else { 4779 if ((offset + size) > mem_info->allocInfo.allocationSize) { 4780 skip_call = 4781 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 4782 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM", 4783 "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64, offset, 4784 size + offset, mem_info->allocInfo.allocationSize); 4785 } 4786 } 4787 } 4788 return skip_call; 4789} 4790 4791static void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) { 4792 auto mem_info = getMemObjInfo(my_data, mem); 4793 if (mem_info) { 4794 mem_info->memRange.offset = offset; 4795 mem_info->memRange.size = size; 4796 } 4797} 4798 4799static bool deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) { 4800 bool skip_call = false; 4801 auto mem_info = getMemObjInfo(my_data, mem); 4802 if (mem_info) { 4803 if (!mem_info->memRange.size) { 4804 // Valid Usage: memory must currently be mapped 4805 skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 4806 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM", 4807 "Unmapping Memory without memory being mapped: mem obj 0x%" PRIxLEAST64, (uint64_t)mem); 4808 } 4809 mem_info->memRange.size = 0; 4810 if (mem_info->pData) { 4811 free(mem_info->pData); 4812 mem_info->pData = 0; 4813 } 4814 } 4815 return skip_call; 4816} 4817 4818static char NoncoherentMemoryFillValue = 0xb; 4819 4820static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize size, void **ppData) { 4821 auto mem_info = getMemObjInfo(dev_data, mem); 4822 if (mem_info) { 4823 mem_info->pDriverData = *ppData; 4824 uint32_t index = mem_info->allocInfo.memoryTypeIndex; 4825 if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) { 4826 mem_info->pData = 0; 4827 } else { 4828 if (size == VK_WHOLE_SIZE) { 4829 size = mem_info->allocInfo.allocationSize; 4830 } 4831 size_t convSize = (size_t)(size); 4832 mem_info->pData = malloc(2 * convSize); 4833 memset(mem_info->pData, NoncoherentMemoryFillValue, 2 * convSize); 4834 *ppData = static_cast<char *>(mem_info->pData) + (convSize / 2); 4835 } 4836 } 4837} 4838// Verify that state for fence being waited on is appropriate. That is, 4839// a fence being waited on should not already be signalled and 4840// it should have been submitted on a queue or during acquire next image 4841static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) { 4842 bool skip_call = false; 4843 4844 auto pFence = getFenceNode(dev_data, fence); 4845 if (pFence) { 4846 if (pFence->state == FENCE_UNSIGNALED) { 4847 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, 4848 reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM", 4849 "%s called for fence 0x%" PRIxLEAST64 " which has not been submitted on a Queue or during " 4850 "acquire next image.", 4851 apiCall, reinterpret_cast<uint64_t &>(fence)); 4852 } 4853 } 4854 return skip_call; 4855} 4856 4857VKAPI_ATTR VkResult VKAPI_CALL 4858WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) { 4859 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 4860 bool skip_call = false; 4861 // Verify fence status of submitted fences 4862 std::unique_lock<std::mutex> lock(global_lock); 4863 for (uint32_t i = 0; i < fenceCount; i++) { 4864 skip_call |= verifyWaitFenceState(dev_data, pFences[i], "vkWaitForFences"); 4865 } 4866 lock.unlock(); 4867 if (skip_call) 4868 return VK_ERROR_VALIDATION_FAILED_EXT; 4869 4870 VkResult result = dev_data->device_dispatch_table->WaitForFences(device, fenceCount, pFences, waitAll, timeout); 4871 4872 if (result == VK_SUCCESS) { 4873 lock.lock(); 4874 // When we know that all fences are complete we can clean/remove their CBs 4875 if (waitAll || fenceCount == 1) { 4876 skip_call |= decrementResources(dev_data, fenceCount, pFences); 4877 } 4878 // NOTE : Alternate case not handled here is when some fences have completed. In 4879 // this case for app to guarantee which fences completed it will have to call 4880 // vkGetFenceStatus() at which point we'll clean/remove their CBs if complete. 4881 lock.unlock(); 4882 } 4883 if (skip_call) 4884 return VK_ERROR_VALIDATION_FAILED_EXT; 4885 return result; 4886} 4887 4888VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) { 4889 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 4890 bool skip_call = false; 4891 std::unique_lock<std::mutex> lock(global_lock); 4892 skip_call = verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus"); 4893 lock.unlock(); 4894 4895 if (skip_call) 4896 return VK_ERROR_VALIDATION_FAILED_EXT; 4897 4898 VkResult result = dev_data->device_dispatch_table->GetFenceStatus(device, fence); 4899 lock.lock(); 4900 if (result == VK_SUCCESS) { 4901 skip_call |= decrementResources(dev_data, 1, &fence); 4902 } 4903 lock.unlock(); 4904 if (skip_call) 4905 return VK_ERROR_VALIDATION_FAILED_EXT; 4906 return result; 4907} 4908 4909VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, 4910 VkQueue *pQueue) { 4911 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 4912 dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue); 4913 std::lock_guard<std::mutex> lock(global_lock); 4914 4915 // Add queue to tracking set only if it is new 4916 auto result = dev_data->queues.emplace(*pQueue); 4917 if (result.second == true) { 4918 QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue]; 4919 pQNode->queue = *pQueue; 4920 pQNode->device = device; 4921 } 4922} 4923 4924VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) { 4925 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map); 4926 bool skip_call = false; 4927 skip_call |= decrementResources(dev_data, queue); 4928 if (skip_call) 4929 return VK_ERROR_VALIDATION_FAILED_EXT; 4930 VkResult result = dev_data->device_dispatch_table->QueueWaitIdle(queue); 4931 return result; 4932} 4933 4934VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) { 4935 bool skip_call = false; 4936 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 4937 std::unique_lock<std::mutex> lock(global_lock); 4938 for (auto queue : dev_data->queues) { 4939 skip_call |= decrementResources(dev_data, queue); 4940 } 4941 dev_data->globalInFlightCmdBuffers.clear(); 4942 lock.unlock(); 4943 if (skip_call) 4944 return VK_ERROR_VALIDATION_FAILED_EXT; 4945 VkResult result = dev_data->device_dispatch_table->DeviceWaitIdle(device); 4946 return result; 4947} 4948 4949VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) { 4950 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 4951 bool skip_call = false; 4952 std::unique_lock<std::mutex> lock(global_lock); 4953 auto fence_pair = dev_data->fenceMap.find(fence); 4954 if (fence_pair != dev_data->fenceMap.end()) { 4955 if (fence_pair->second.state == FENCE_INFLIGHT) { 4956 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, 4957 (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS", "Fence 0x%" PRIx64 " is in use.", 4958 (uint64_t)(fence)); 4959 } 4960 dev_data->fenceMap.erase(fence_pair); 4961 } 4962 lock.unlock(); 4963 4964 if (!skip_call) 4965 dev_data->device_dispatch_table->DestroyFence(device, fence, pAllocator); 4966} 4967 4968VKAPI_ATTR void VKAPI_CALL 4969DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) { 4970 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 4971 dev_data->device_dispatch_table->DestroySemaphore(device, semaphore, pAllocator); 4972 std::lock_guard<std::mutex> lock(global_lock); 4973 auto item = dev_data->semaphoreMap.find(semaphore); 4974 if (item != dev_data->semaphoreMap.end()) { 4975 if (item->second.in_use.load()) { 4976 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, 4977 reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS", 4978 "Cannot delete semaphore 0x%" PRIx64 " which is in use.", reinterpret_cast<uint64_t &>(semaphore)); 4979 } 4980 dev_data->semaphoreMap.erase(semaphore); 4981 } 4982 // TODO : Clean up any internal data structures using this obj. 4983} 4984 4985VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) { 4986 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 4987 bool skip_call = false; 4988 std::unique_lock<std::mutex> lock(global_lock); 4989 auto event_data = dev_data->eventMap.find(event); 4990 if (event_data != dev_data->eventMap.end()) { 4991 if (event_data->second.in_use.load()) { 4992 skip_call |= log_msg( 4993 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 4994 reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS", 4995 "Cannot delete event 0x%" PRIx64 " which is in use by a command buffer.", reinterpret_cast<uint64_t &>(event)); 4996 } 4997 dev_data->eventMap.erase(event_data); 4998 } 4999 lock.unlock(); 5000 if (!skip_call) 5001 dev_data->device_dispatch_table->DestroyEvent(device, event, pAllocator); 5002 // TODO : Clean up any internal data structures using this obj. 5003} 5004 5005VKAPI_ATTR void VKAPI_CALL 5006DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) { 5007 get_my_data_ptr(get_dispatch_key(device), layer_data_map) 5008 ->device_dispatch_table->DestroyQueryPool(device, queryPool, pAllocator); 5009 // TODO : Clean up any internal data structures using this obj. 5010} 5011 5012VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, 5013 uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride, 5014 VkQueryResultFlags flags) { 5015 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5016 unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight; 5017 std::unique_lock<std::mutex> lock(global_lock); 5018 for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) { 5019 auto pCB = getCBNode(dev_data, cmdBuffer); 5020 for (auto queryStatePair : pCB->queryToStateMap) { 5021 queriesInFlight[queryStatePair.first].push_back(cmdBuffer); 5022 } 5023 } 5024 bool skip_call = false; 5025 for (uint32_t i = 0; i < queryCount; ++i) { 5026 QueryObject query = {queryPool, firstQuery + i}; 5027 auto queryElement = queriesInFlight.find(query); 5028 auto queryToStateElement = dev_data->queryToStateMap.find(query); 5029 if (queryToStateElement != dev_data->queryToStateMap.end()) { 5030 // Available and in flight 5031 if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() && 5032 queryToStateElement->second) { 5033 for (auto cmdBuffer : queryElement->second) { 5034 auto pCB = getCBNode(dev_data, cmdBuffer); 5035 auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query); 5036 if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) { 5037 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 5038 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS", 5039 "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.", 5040 (uint64_t)(queryPool), firstQuery + i); 5041 } else { 5042 for (auto event : queryEventElement->second) { 5043 dev_data->eventMap[event].needsSignaled = true; 5044 } 5045 } 5046 } 5047 // Unavailable and in flight 5048 } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() && 5049 !queryToStateElement->second) { 5050 // TODO : Can there be the same query in use by multiple command buffers in flight? 5051 bool make_available = false; 5052 for (auto cmdBuffer : queryElement->second) { 5053 auto pCB = getCBNode(dev_data, cmdBuffer); 5054 make_available |= pCB->queryToStateMap[query]; 5055 } 5056 if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) { 5057 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 5058 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS", 5059 "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.", 5060 (uint64_t)(queryPool), firstQuery + i); 5061 } 5062 // Unavailable 5063 } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) { 5064 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 5065 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS", 5066 "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.", 5067 (uint64_t)(queryPool), firstQuery + i); 5068 // Unitialized 5069 } else if (queryToStateElement == dev_data->queryToStateMap.end()) { 5070 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 5071 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS", 5072 "Cannot get query results on queryPool 0x%" PRIx64 5073 " with index %d as data has not been collected for this index.", 5074 (uint64_t)(queryPool), firstQuery + i); 5075 } 5076 } 5077 } 5078 lock.unlock(); 5079 if (skip_call) 5080 return VK_ERROR_VALIDATION_FAILED_EXT; 5081 return dev_data->device_dispatch_table->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, 5082 flags); 5083} 5084 5085static bool validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) { 5086 bool skip_call = false; 5087 auto buffer_node = getBufferNode(my_data, buffer); 5088 if (!buffer_node) { 5089 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, 5090 (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS", 5091 "Cannot free buffer 0x%" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer)); 5092 } else { 5093 if (buffer_node->in_use.load()) { 5094 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, 5095 (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS", 5096 "Cannot free buffer 0x%" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer)); 5097 } 5098 } 5099 return skip_call; 5100} 5101 5102static bool print_memory_range_error(layer_data *dev_data, const uint64_t object_handle, const uint64_t other_handle, 5103 VkDebugReportObjectTypeEXT object_type) { 5104 if (object_type == VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT) { 5105 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0, 5106 MEMTRACK_INVALID_ALIASING, "MEM", "Buffer 0x%" PRIx64 " is aliased with image 0x%" PRIx64, object_handle, 5107 other_handle); 5108 } else { 5109 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0, 5110 MEMTRACK_INVALID_ALIASING, "MEM", "Image 0x%" PRIx64 " is aliased with buffer 0x%" PRIx64, object_handle, 5111 other_handle); 5112 } 5113} 5114 5115static bool validate_memory_range(layer_data *dev_data, const vector<MEMORY_RANGE> &ranges, const MEMORY_RANGE &new_range, 5116 VkDebugReportObjectTypeEXT object_type) { 5117 bool skip_call = false; 5118 5119 for (auto range : ranges) { 5120 if ((range.end & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)) < 5121 (new_range.start & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1))) 5122 continue; 5123 if ((range.start & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)) > 5124 (new_range.end & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1))) 5125 continue; 5126 skip_call |= print_memory_range_error(dev_data, new_range.handle, range.handle, object_type); 5127 } 5128 return skip_call; 5129} 5130 5131static MEMORY_RANGE insert_memory_ranges(uint64_t handle, VkDeviceMemory mem, VkDeviceSize memoryOffset, 5132 VkMemoryRequirements memRequirements, vector<MEMORY_RANGE> &ranges) { 5133 MEMORY_RANGE range; 5134 range.handle = handle; 5135 range.memory = mem; 5136 range.start = memoryOffset; 5137 range.end = memoryOffset + memRequirements.size - 1; 5138 ranges.push_back(range); 5139 return range; 5140} 5141 5142static void remove_memory_ranges(uint64_t handle, VkDeviceMemory mem, vector<MEMORY_RANGE> &ranges) { 5143 for (uint32_t item = 0; item < ranges.size(); item++) { 5144 if ((ranges[item].handle == handle) && (ranges[item].memory == mem)) { 5145 ranges.erase(ranges.begin() + item); 5146 break; 5147 } 5148 } 5149} 5150 5151VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer, 5152 const VkAllocationCallbacks *pAllocator) { 5153 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5154 std::unique_lock<std::mutex> lock(global_lock); 5155 if (!validateIdleBuffer(dev_data, buffer)) { 5156 lock.unlock(); 5157 dev_data->device_dispatch_table->DestroyBuffer(device, buffer, pAllocator); 5158 lock.lock(); 5159 } 5160 // Clean up memory binding and range information for buffer 5161 auto buff_node = getBufferNode(dev_data, buffer); 5162 if (buff_node) { 5163 // Any bound cmd buffers are now invalid 5164 invalidateCommandBuffers(buff_node->cb_bindings, 5165 {reinterpret_cast<uint64_t &>(buff_node->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT}); 5166 auto mem_info = getMemObjInfo(dev_data, buff_node->mem); 5167 if (mem_info) { 5168 remove_memory_ranges(reinterpret_cast<uint64_t &>(buffer), buff_node->mem, mem_info->bufferRanges); 5169 } 5170 clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT); 5171 dev_data->bufferMap.erase(buff_node->buffer); 5172 } 5173} 5174 5175VKAPI_ATTR void VKAPI_CALL 5176DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) { 5177 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5178 dev_data->device_dispatch_table->DestroyBufferView(device, bufferView, pAllocator); 5179 std::lock_guard<std::mutex> lock(global_lock); 5180 auto item = dev_data->bufferViewMap.find(bufferView); 5181 if (item != dev_data->bufferViewMap.end()) { 5182 dev_data->bufferViewMap.erase(item); 5183 } 5184} 5185 5186VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) { 5187 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5188 // TODO : Flag error if image is use by in-flight command buffer 5189 dev_data->device_dispatch_table->DestroyImage(device, image, pAllocator); 5190 5191 std::lock_guard<std::mutex> lock(global_lock); 5192 auto img_node = getImageNode(dev_data, image); 5193 if (img_node) { 5194 // Any bound cmd buffers are now invalid 5195 invalidateCommandBuffers(img_node->cb_bindings, 5196 {reinterpret_cast<uint64_t &>(img_node->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT}); 5197 // Clean up memory mapping, bindings and range references for image 5198 auto mem_info = getMemObjInfo(dev_data, img_node->mem); 5199 if (mem_info) { 5200 remove_memory_ranges(reinterpret_cast<uint64_t &>(image), img_node->mem, mem_info->imageRanges); 5201 clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT); 5202 mem_info->image = VK_NULL_HANDLE; 5203 } 5204 // Remove image from imageMap 5205 dev_data->imageMap.erase(img_node->image); 5206 } 5207 const auto& subEntry = dev_data->imageSubresourceMap.find(image); 5208 if (subEntry != dev_data->imageSubresourceMap.end()) { 5209 for (const auto& pair : subEntry->second) { 5210 dev_data->imageLayoutMap.erase(pair); 5211 } 5212 dev_data->imageSubresourceMap.erase(subEntry); 5213 } 5214} 5215 5216static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits, 5217 const char *funcName) { 5218 bool skip_call = false; 5219 if (((1 << mem_info->allocInfo.memoryTypeIndex) & memory_type_bits) == 0) { 5220 skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 5221 reinterpret_cast<const uint64_t &>(mem_info->mem), __LINE__, MEMTRACK_INVALID_MEM_TYPE, "MT", 5222 "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory " 5223 "type (0x%X) of this memory object 0x%" PRIx64 ".", 5224 funcName, memory_type_bits, mem_info->allocInfo.memoryTypeIndex, 5225 reinterpret_cast<const uint64_t &>(mem_info->mem)); 5226 } 5227 return skip_call; 5228} 5229 5230VKAPI_ATTR VkResult VKAPI_CALL 5231BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) { 5232 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5233 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 5234 std::unique_lock<std::mutex> lock(global_lock); 5235 // Track objects tied to memory 5236 uint64_t buffer_handle = (uint64_t)(buffer); 5237 bool skip_call = set_mem_binding(dev_data, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory"); 5238 auto buffer_node = getBufferNode(dev_data, buffer); 5239 if (buffer_node) { 5240 VkMemoryRequirements memRequirements; 5241 dev_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, &memRequirements); 5242 buffer_node->mem = mem; 5243 buffer_node->memOffset = memoryOffset; 5244 buffer_node->memSize = memRequirements.size; 5245 5246 // Track and validate bound memory range information 5247 auto mem_info = getMemObjInfo(dev_data, mem); 5248 if (mem_info) { 5249 const MEMORY_RANGE range = 5250 insert_memory_ranges(buffer_handle, mem, memoryOffset, memRequirements, mem_info->bufferRanges); 5251 skip_call |= validate_memory_range(dev_data, mem_info->imageRanges, range, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT); 5252 skip_call |= ValidateMemoryTypes(dev_data, mem_info, memRequirements.memoryTypeBits, "BindBufferMemory"); 5253 } 5254 5255 // Validate memory requirements alignment 5256 if (vk_safe_modulo(memoryOffset, memRequirements.alignment) != 0) { 5257 skip_call |= 5258 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, 5259 __LINE__, DRAWSTATE_INVALID_BUFFER_MEMORY_OFFSET, "DS", 5260 "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the " 5261 "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64 5262 ", returned from a call to vkGetBufferMemoryRequirements with buffer", 5263 memoryOffset, memRequirements.alignment); 5264 } 5265 // Validate device limits alignments 5266 VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage; 5267 if (usage & (VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT)) { 5268 if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment) != 0) { 5269 skip_call |= 5270 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 5271 0, __LINE__, DRAWSTATE_INVALID_TEXEL_BUFFER_OFFSET, "DS", 5272 "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of " 5273 "device limit minTexelBufferOffsetAlignment 0x%" PRIxLEAST64, 5274 memoryOffset, dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment); 5275 } 5276 } 5277 if (usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) { 5278 if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 5279 0) { 5280 skip_call |= 5281 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 5282 0, __LINE__, DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS", 5283 "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of " 5284 "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64, 5285 memoryOffset, dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment); 5286 } 5287 } 5288 if (usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) { 5289 if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 5290 0) { 5291 skip_call |= 5292 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 5293 0, __LINE__, DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS", 5294 "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of " 5295 "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64, 5296 memoryOffset, dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment); 5297 } 5298 } 5299 } 5300 print_mem_list(dev_data); 5301 lock.unlock(); 5302 if (!skip_call) { 5303 result = dev_data->device_dispatch_table->BindBufferMemory(device, buffer, mem, memoryOffset); 5304 } 5305 return result; 5306} 5307 5308VKAPI_ATTR void VKAPI_CALL 5309GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) { 5310 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5311 // TODO : What to track here? 5312 // Could potentially save returned mem requirements and validate values passed into BindBufferMemory 5313 my_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, pMemoryRequirements); 5314} 5315 5316VKAPI_ATTR void VKAPI_CALL 5317GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) { 5318 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5319 // TODO : What to track here? 5320 // Could potentially save returned mem requirements and validate values passed into BindImageMemory 5321 my_data->device_dispatch_table->GetImageMemoryRequirements(device, image, pMemoryRequirements); 5322} 5323 5324VKAPI_ATTR void VKAPI_CALL 5325DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) { 5326 get_my_data_ptr(get_dispatch_key(device), layer_data_map) 5327 ->device_dispatch_table->DestroyImageView(device, imageView, pAllocator); 5328 // TODO : Clean up any internal data structures using this obj. 5329} 5330 5331VKAPI_ATTR void VKAPI_CALL 5332DestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) { 5333 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5334 5335 std::unique_lock<std::mutex> lock(global_lock); 5336 my_data->shaderModuleMap.erase(shaderModule); 5337 lock.unlock(); 5338 5339 my_data->device_dispatch_table->DestroyShaderModule(device, shaderModule, pAllocator); 5340} 5341 5342VKAPI_ATTR void VKAPI_CALL 5343DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) { 5344 get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyPipeline(device, pipeline, pAllocator); 5345 // TODO : Clean up any internal data structures using this obj. 5346} 5347 5348VKAPI_ATTR void VKAPI_CALL 5349DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) { 5350 get_my_data_ptr(get_dispatch_key(device), layer_data_map) 5351 ->device_dispatch_table->DestroyPipelineLayout(device, pipelineLayout, pAllocator); 5352 // TODO : Clean up any internal data structures using this obj. 5353} 5354 5355VKAPI_ATTR void VKAPI_CALL 5356DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) { 5357 get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroySampler(device, sampler, pAllocator); 5358 // TODO : Clean up any internal data structures using this obj. 5359} 5360 5361VKAPI_ATTR void VKAPI_CALL 5362DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) { 5363 get_my_data_ptr(get_dispatch_key(device), layer_data_map) 5364 ->device_dispatch_table->DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator); 5365 // TODO : Clean up any internal data structures using this obj. 5366} 5367 5368VKAPI_ATTR void VKAPI_CALL 5369DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) { 5370 get_my_data_ptr(get_dispatch_key(device), layer_data_map) 5371 ->device_dispatch_table->DestroyDescriptorPool(device, descriptorPool, pAllocator); 5372 // TODO : Clean up any internal data structures using this obj. 5373} 5374// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip_call result 5375// If this is a secondary command buffer, then make sure its primary is also in-flight 5376// If primary is not in-flight, then remove secondary from global in-flight set 5377// This function is only valid at a point when cmdBuffer is being reset or freed 5378static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action) { 5379 bool skip_call = false; 5380 if (dev_data->globalInFlightCmdBuffers.count(cb_node->commandBuffer)) { 5381 // Primary CB or secondary where primary is also in-flight is an error 5382 if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_SECONDARY) || 5383 (dev_data->globalInFlightCmdBuffers.count(cb_node->primaryCommandBuffer))) { 5384 skip_call |= log_msg( 5385 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 5386 reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS", 5387 "Attempt to %s command buffer (0x%" PRIxLEAST64 ") which is in use.", action, 5388 reinterpret_cast<const uint64_t &>(cb_node->commandBuffer)); 5389 } 5390 } 5391 return skip_call; 5392} 5393 5394// Iterate over all cmdBuffers in given commandPool and verify that each is not in use 5395static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action) { 5396 bool skip_call = false; 5397 for (auto cmd_buffer : pPool->commandBuffers) { 5398 if (dev_data->globalInFlightCmdBuffers.count(cmd_buffer)) { 5399 skip_call |= checkCommandBufferInFlight(dev_data, getCBNode(dev_data, cmd_buffer), action); 5400 } 5401 } 5402 return skip_call; 5403} 5404 5405static void clearCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool) { 5406 for (auto cmd_buffer : pPool->commandBuffers) { 5407 dev_data->globalInFlightCmdBuffers.erase(cmd_buffer); 5408 } 5409} 5410 5411VKAPI_ATTR void VKAPI_CALL 5412FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) { 5413 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5414 bool skip_call = false; 5415 std::unique_lock<std::mutex> lock(global_lock); 5416 5417 for (uint32_t i = 0; i < commandBufferCount; i++) { 5418 auto cb_node = getCBNode(dev_data, pCommandBuffers[i]); 5419 // Delete CB information structure, and remove from commandBufferMap 5420 if (cb_node) { 5421 skip_call |= checkCommandBufferInFlight(dev_data, cb_node, "free"); 5422 } 5423 } 5424 lock.unlock(); 5425 5426 if (skip_call) 5427 return; 5428 5429 dev_data->device_dispatch_table->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers); 5430 5431 lock.lock(); 5432 auto pPool = getCommandPoolNode(dev_data, commandPool); 5433 for (uint32_t i = 0; i < commandBufferCount; i++) { 5434 auto cb_node = getCBNode(dev_data, pCommandBuffers[i]); 5435 // Delete CB information structure, and remove from commandBufferMap 5436 if (cb_node) { 5437 dev_data->globalInFlightCmdBuffers.erase(cb_node->commandBuffer); 5438 // reset prior to delete for data clean-up 5439 resetCB(dev_data, cb_node->commandBuffer); 5440 dev_data->commandBufferMap.erase(cb_node->commandBuffer); 5441 delete cb_node; 5442 } 5443 5444 // Remove commandBuffer reference from commandPoolMap 5445 pPool->commandBuffers.remove(pCommandBuffers[i]); 5446 } 5447 printCBList(dev_data); 5448 lock.unlock(); 5449} 5450 5451VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo, 5452 const VkAllocationCallbacks *pAllocator, 5453 VkCommandPool *pCommandPool) { 5454 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5455 5456 VkResult result = dev_data->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool); 5457 5458 if (VK_SUCCESS == result) { 5459 std::lock_guard<std::mutex> lock(global_lock); 5460 dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags; 5461 dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex; 5462 } 5463 return result; 5464} 5465 5466VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo, 5467 const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) { 5468 5469 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5470 VkResult result = dev_data->device_dispatch_table->CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool); 5471 if (result == VK_SUCCESS) { 5472 std::lock_guard<std::mutex> lock(global_lock); 5473 dev_data->queryPoolMap[*pQueryPool].createInfo = *pCreateInfo; 5474 } 5475 return result; 5476} 5477 5478// Destroy commandPool along with all of the commandBuffers allocated from that pool 5479VKAPI_ATTR void VKAPI_CALL 5480DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) { 5481 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5482 bool skip_call = false; 5483 std::unique_lock<std::mutex> lock(global_lock); 5484 // Verify that command buffers in pool are complete (not in-flight) 5485 auto pPool = getCommandPoolNode(dev_data, commandPool); 5486 skip_call |= checkCommandBuffersInFlight(dev_data, pPool, "destroy command pool with"); 5487 5488 lock.unlock(); 5489 5490 if (skip_call) 5491 return; 5492 5493 dev_data->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator); 5494 5495 lock.lock(); 5496 // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandBufferMap 5497 clearCommandBuffersInFlight(dev_data, pPool); 5498 for (auto cb : pPool->commandBuffers) { 5499 clear_cmd_buf_and_mem_references(dev_data, cb); 5500 auto cb_node = getCBNode(dev_data, cb); 5501 dev_data->commandBufferMap.erase(cb); // Remove this command buffer 5502 delete cb_node; // delete CB info structure 5503 } 5504 dev_data->commandPoolMap.erase(commandPool); 5505 lock.unlock(); 5506} 5507 5508VKAPI_ATTR VkResult VKAPI_CALL 5509ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) { 5510 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5511 bool skip_call = false; 5512 5513 std::unique_lock<std::mutex> lock(global_lock); 5514 auto pPool = getCommandPoolNode(dev_data, commandPool); 5515 skip_call |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with"); 5516 lock.unlock(); 5517 5518 if (skip_call) 5519 return VK_ERROR_VALIDATION_FAILED_EXT; 5520 5521 VkResult result = dev_data->device_dispatch_table->ResetCommandPool(device, commandPool, flags); 5522 5523 // Reset all of the CBs allocated from this pool 5524 if (VK_SUCCESS == result) { 5525 lock.lock(); 5526 clearCommandBuffersInFlight(dev_data, pPool); 5527 for (auto cmdBuffer : pPool->commandBuffers) { 5528 resetCB(dev_data, cmdBuffer); 5529 } 5530 lock.unlock(); 5531 } 5532 return result; 5533} 5534 5535VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) { 5536 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5537 bool skip_call = false; 5538 std::unique_lock<std::mutex> lock(global_lock); 5539 for (uint32_t i = 0; i < fenceCount; ++i) { 5540 auto pFence = getFenceNode(dev_data, pFences[i]); 5541 if (pFence && pFence->state == FENCE_INFLIGHT) { 5542 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, 5543 reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS", 5544 "Fence 0x%" PRIx64 " is in use.", reinterpret_cast<const uint64_t &>(pFences[i])); 5545 } 5546 } 5547 lock.unlock(); 5548 5549 if (skip_call) 5550 return VK_ERROR_VALIDATION_FAILED_EXT; 5551 5552 VkResult result = dev_data->device_dispatch_table->ResetFences(device, fenceCount, pFences); 5553 5554 if (result == VK_SUCCESS) { 5555 lock.lock(); 5556 for (uint32_t i = 0; i < fenceCount; ++i) { 5557 auto pFence = getFenceNode(dev_data, pFences[i]); 5558 if (pFence) { 5559 pFence->state = FENCE_UNSIGNALED; 5560 // TODO: these should really have already been enforced on 5561 // INFLIGHT->RETIRED transition. 5562 pFence->queues.clear(); 5563 pFence->priorFences.clear(); 5564 } 5565 } 5566 lock.unlock(); 5567 } 5568 5569 return result; 5570} 5571 5572// For given cb_nodes, invalidate them and track object causing invalidation 5573void invalidateCommandBuffers(std::unordered_set<GLOBAL_CB_NODE *> cb_nodes, VK_OBJECT obj) { 5574 for (auto cb_node : cb_nodes) { 5575 cb_node->state = CB_INVALID; 5576 cb_node->broken_bindings.push_back(obj); 5577 } 5578} 5579 5580VKAPI_ATTR void VKAPI_CALL 5581DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) { 5582 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5583 std::unique_lock<std::mutex> lock(global_lock); 5584 auto fb_node = getFramebuffer(dev_data, framebuffer); 5585 if (fb_node) { 5586 invalidateCommandBuffers(fb_node->cb_bindings, 5587 {reinterpret_cast<uint64_t &>(fb_node->framebuffer), VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT}); 5588 dev_data->frameBufferMap.erase(fb_node->framebuffer); 5589 } 5590 lock.unlock(); 5591 dev_data->device_dispatch_table->DestroyFramebuffer(device, framebuffer, pAllocator); 5592} 5593 5594VKAPI_ATTR void VKAPI_CALL 5595DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) { 5596 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5597 dev_data->device_dispatch_table->DestroyRenderPass(device, renderPass, pAllocator); 5598 std::lock_guard<std::mutex> lock(global_lock); 5599 dev_data->renderPassMap.erase(renderPass); 5600 // TODO: leaking all the guts of the renderpass node here! 5601} 5602 5603VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo, 5604 const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) { 5605 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5606 5607 VkResult result = dev_data->device_dispatch_table->CreateBuffer(device, pCreateInfo, pAllocator, pBuffer); 5608 5609 if (VK_SUCCESS == result) { 5610 std::lock_guard<std::mutex> lock(global_lock); 5611 // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid 5612 dev_data->bufferMap.insert(std::make_pair(*pBuffer, unique_ptr<BUFFER_NODE>(new BUFFER_NODE(*pBuffer, pCreateInfo)))); 5613 } 5614 return result; 5615} 5616 5617static bool PreCallValidateCreateBufferView(layer_data *dev_data, const VkBufferViewCreateInfo *pCreateInfo) { 5618 bool skip_call = false; 5619 BUFFER_NODE *buf_node = getBufferNode(dev_data, pCreateInfo->buffer); 5620 // If this isn't a sparse buffer, it needs to have memory backing it at CreateBufferView time 5621 if (0 == (static_cast<uint32_t>(buf_node->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) { 5622 if (buf_node->mem) { 5623 DEVICE_MEM_INFO *pMemObjInfo = getMemObjInfo(dev_data, buf_node->mem); 5624 if (!pMemObjInfo) { 5625 skip_call |= 5626 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 5627 reinterpret_cast<uint64_t &>(buf_node->mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM", 5628 "vkCreateBufferView called with invalid memory 0x%" PRIx64 " bound to buffer 0x%" PRIx64 ". Memory " 5629 "must be bound prior to creating a view to a non-sparse buffer.", 5630 reinterpret_cast<uint64_t &>(buf_node->mem), reinterpret_cast<const uint64_t &>(pCreateInfo->buffer)); 5631 } 5632 } else { 5633 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 5634 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM", 5635 "vkCreateBufferView called with invalid memory bound to buffer 0x%" PRIx64 ". Memory " 5636 "must be bound prior to creating a view to a non-sparse buffer.", 5637 reinterpret_cast<const uint64_t &>(pCreateInfo->buffer)); 5638 } 5639 } 5640 // In order to create a valid buffer view, the buffer must have been created with at least one of the 5641 // following flags: UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT 5642 validateBufferUsageFlags(dev_data, buf_node, 5643 VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, false, 5644 "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT"); 5645 return skip_call; 5646} 5647 5648VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo, 5649 const VkAllocationCallbacks *pAllocator, VkBufferView *pView) { 5650 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5651 std::unique_lock<std::mutex> lock(global_lock); 5652 bool skip_call = PreCallValidateCreateBufferView(dev_data, pCreateInfo); 5653 lock.unlock(); 5654 if (skip_call) 5655 return VK_ERROR_VALIDATION_FAILED_EXT; 5656 VkResult result = dev_data->device_dispatch_table->CreateBufferView(device, pCreateInfo, pAllocator, pView); 5657 if (VK_SUCCESS == result) { 5658 lock.lock(); 5659 dev_data->bufferViewMap[*pView] = unique_ptr<VkBufferViewCreateInfo>(new VkBufferViewCreateInfo(*pCreateInfo)); 5660 lock.unlock(); 5661 } 5662 return result; 5663} 5664 5665VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo, 5666 const VkAllocationCallbacks *pAllocator, VkImage *pImage) { 5667 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5668 5669 VkResult result = dev_data->device_dispatch_table->CreateImage(device, pCreateInfo, pAllocator, pImage); 5670 5671 if (VK_SUCCESS == result) { 5672 std::lock_guard<std::mutex> lock(global_lock); 5673 IMAGE_LAYOUT_NODE image_node; 5674 image_node.layout = pCreateInfo->initialLayout; 5675 image_node.format = pCreateInfo->format; 5676 dev_data->imageMap.insert(std::make_pair(*pImage, unique_ptr<IMAGE_NODE>(new IMAGE_NODE(*pImage, pCreateInfo)))); 5677 ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()}; 5678 dev_data->imageSubresourceMap[*pImage].push_back(subpair); 5679 dev_data->imageLayoutMap[subpair] = image_node; 5680 } 5681 return result; 5682} 5683 5684static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) { 5685 /* expects global_lock to be held by caller */ 5686 5687 auto image_node = getImageNode(dev_data, image); 5688 if (image_node) { 5689 /* If the caller used the special values VK_REMAINING_MIP_LEVELS and 5690 * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to 5691 * the actual values. 5692 */ 5693 if (range->levelCount == VK_REMAINING_MIP_LEVELS) { 5694 range->levelCount = image_node->createInfo.mipLevels - range->baseMipLevel; 5695 } 5696 5697 if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) { 5698 range->layerCount = image_node->createInfo.arrayLayers - range->baseArrayLayer; 5699 } 5700 } 5701} 5702 5703// Return the correct layer/level counts if the caller used the special 5704// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS. 5705static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range, 5706 VkImage image) { 5707 /* expects global_lock to be held by caller */ 5708 5709 *levels = range.levelCount; 5710 *layers = range.layerCount; 5711 auto image_node = getImageNode(dev_data, image); 5712 if (image_node) { 5713 if (range.levelCount == VK_REMAINING_MIP_LEVELS) { 5714 *levels = image_node->createInfo.mipLevels - range.baseMipLevel; 5715 } 5716 if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) { 5717 *layers = image_node->createInfo.arrayLayers - range.baseArrayLayer; 5718 } 5719 } 5720} 5721 5722static bool PreCallValidateCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *pCreateInfo) { 5723 bool skip_call = false; 5724 IMAGE_NODE *image_node = getImageNode(dev_data, pCreateInfo->image); 5725 skip_call |= validateImageUsageFlags(dev_data, image_node, 5726 VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | 5727 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, 5728 false, "vkCreateImageView()", "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT]_BIT"); 5729 // If this isn't a sparse image, it needs to have memory backing it at CreateImageView time 5730 if (0 == (static_cast<uint32_t>(image_node->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) { 5731 if (MEMTRACKER_SWAP_CHAIN_IMAGE_KEY != image_node->mem) { 5732 if (image_node->mem) { 5733 DEVICE_MEM_INFO *pMemObjInfo = getMemObjInfo(dev_data, image_node->mem); 5734 if (!pMemObjInfo) { 5735 skip_call |= log_msg( 5736 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 5737 reinterpret_cast<uint64_t &>(image_node->mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM", 5738 "vkCreateImageView called with invalid memory 0x%" PRIx64 " bound to image 0x%" PRIx64 ". Memory " 5739 "must be bound prior to creating a view to a non-sparse image.", 5740 reinterpret_cast<uint64_t &>(image_node->mem), reinterpret_cast<const uint64_t &>(pCreateInfo->image)); 5741 } 5742 } else { 5743 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 5744 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM", 5745 "vkCreateImageView called with invalid memory bound to image 0x%" PRIx64 ". Memory " 5746 "must be bound prior to creating a view to a non-sparse image.", 5747 reinterpret_cast<const uint64_t &>(pCreateInfo->image)); 5748 } 5749 } 5750 } 5751 return skip_call; 5752} 5753 5754static inline void PostCallRecordCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *pCreateInfo, VkImageView *pView) { 5755 dev_data->imageViewMap[*pView] = unique_ptr<VkImageViewCreateInfo>(new VkImageViewCreateInfo(*pCreateInfo)); 5756 ResolveRemainingLevelsLayers(dev_data, &dev_data->imageViewMap[*pView].get()->subresourceRange, pCreateInfo->image); 5757} 5758 5759VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo, 5760 const VkAllocationCallbacks *pAllocator, VkImageView *pView) { 5761 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5762 std::unique_lock<std::mutex> lock(global_lock); 5763 bool skip_call = PreCallValidateCreateImageView(dev_data, pCreateInfo); 5764 lock.unlock(); 5765 if (skip_call) 5766 return VK_ERROR_VALIDATION_FAILED_EXT; 5767 VkResult result = dev_data->device_dispatch_table->CreateImageView(device, pCreateInfo, pAllocator, pView); 5768 if (VK_SUCCESS == result) { 5769 lock.lock(); 5770 PostCallRecordCreateImageView(dev_data, pCreateInfo, pView); 5771 lock.unlock(); 5772 } 5773 5774 return result; 5775} 5776 5777VKAPI_ATTR VkResult VKAPI_CALL 5778CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) { 5779 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5780 VkResult result = dev_data->device_dispatch_table->CreateFence(device, pCreateInfo, pAllocator, pFence); 5781 if (VK_SUCCESS == result) { 5782 std::lock_guard<std::mutex> lock(global_lock); 5783 auto &fence_node = dev_data->fenceMap[*pFence]; 5784 fence_node.fence = *pFence; 5785 fence_node.createInfo = *pCreateInfo; 5786 fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED; 5787 } 5788 return result; 5789} 5790 5791// TODO handle pipeline caches 5792VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo, 5793 const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) { 5794 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5795 VkResult result = dev_data->device_dispatch_table->CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache); 5796 return result; 5797} 5798 5799VKAPI_ATTR void VKAPI_CALL 5800DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) { 5801 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5802 dev_data->device_dispatch_table->DestroyPipelineCache(device, pipelineCache, pAllocator); 5803} 5804 5805VKAPI_ATTR VkResult VKAPI_CALL 5806GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) { 5807 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5808 VkResult result = dev_data->device_dispatch_table->GetPipelineCacheData(device, pipelineCache, pDataSize, pData); 5809 return result; 5810} 5811 5812VKAPI_ATTR VkResult VKAPI_CALL 5813MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) { 5814 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5815 VkResult result = dev_data->device_dispatch_table->MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches); 5816 return result; 5817} 5818 5819// utility function to set collective state for pipeline 5820void set_pipeline_state(PIPELINE_NODE *pPipe) { 5821 // If any attachment used by this pipeline has blendEnable, set top-level blendEnable 5822 if (pPipe->graphicsPipelineCI.pColorBlendState) { 5823 for (size_t i = 0; i < pPipe->attachments.size(); ++i) { 5824 if (VK_TRUE == pPipe->attachments[i].blendEnable) { 5825 if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) && 5826 (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) || 5827 ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) && 5828 (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) || 5829 ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) && 5830 (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) || 5831 ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) && 5832 (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) { 5833 pPipe->blendConstantsEnabled = true; 5834 } 5835 } 5836 } 5837 } 5838} 5839 5840VKAPI_ATTR VkResult VKAPI_CALL 5841CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, 5842 const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, 5843 VkPipeline *pPipelines) { 5844 VkResult result = VK_SUCCESS; 5845 // TODO What to do with pipelineCache? 5846 // The order of operations here is a little convoluted but gets the job done 5847 // 1. Pipeline create state is first shadowed into PIPELINE_NODE struct 5848 // 2. Create state is then validated (which uses flags setup during shadowing) 5849 // 3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap 5850 bool skip_call = false; 5851 // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic 5852 vector<PIPELINE_NODE *> pPipeNode(count); 5853 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5854 5855 uint32_t i = 0; 5856 std::unique_lock<std::mutex> lock(global_lock); 5857 5858 for (i = 0; i < count; i++) { 5859 pPipeNode[i] = new PIPELINE_NODE; 5860 pPipeNode[i]->initGraphicsPipeline(&pCreateInfos[i]); 5861 pPipeNode[i]->render_pass_ci.initialize(getRenderPass(dev_data, pCreateInfos[i].renderPass)->pCreateInfo); 5862 pPipeNode[i]->pipelineLayout = getPipelineLayout(dev_data, pCreateInfos[i].layout); 5863 5864 skip_call |= verifyPipelineCreateState(dev_data, device, pPipeNode, i); 5865 } 5866 5867 if (!skip_call) { 5868 lock.unlock(); 5869 result = dev_data->device_dispatch_table->CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, 5870 pPipelines); 5871 lock.lock(); 5872 for (i = 0; i < count; i++) { 5873 pPipeNode[i]->pipeline = pPipelines[i]; 5874 dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i]; 5875 } 5876 lock.unlock(); 5877 } else { 5878 for (i = 0; i < count; i++) { 5879 delete pPipeNode[i]; 5880 } 5881 lock.unlock(); 5882 return VK_ERROR_VALIDATION_FAILED_EXT; 5883 } 5884 return result; 5885} 5886 5887VKAPI_ATTR VkResult VKAPI_CALL 5888CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, 5889 const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, 5890 VkPipeline *pPipelines) { 5891 VkResult result = VK_SUCCESS; 5892 bool skip_call = false; 5893 5894 // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic 5895 vector<PIPELINE_NODE *> pPipeNode(count); 5896 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5897 5898 uint32_t i = 0; 5899 std::unique_lock<std::mutex> lock(global_lock); 5900 for (i = 0; i < count; i++) { 5901 // TODO: Verify compute stage bits 5902 5903 // Create and initialize internal tracking data structure 5904 pPipeNode[i] = new PIPELINE_NODE; 5905 pPipeNode[i]->initComputePipeline(&pCreateInfos[i]); 5906 pPipeNode[i]->pipelineLayout = getPipelineLayout(dev_data, pCreateInfos[i].layout); 5907 // memcpy(&pPipeNode[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo)); 5908 5909 // TODO: Add Compute Pipeline Verification 5910 skip_call |= !validate_compute_pipeline(dev_data->report_data, pPipeNode[i], &dev_data->phys_dev_properties.features, 5911 dev_data->shaderModuleMap); 5912 // skip_call |= verifyPipelineCreateState(dev_data, device, pPipeNode[i]); 5913 } 5914 5915 if (!skip_call) { 5916 lock.unlock(); 5917 result = dev_data->device_dispatch_table->CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, 5918 pPipelines); 5919 lock.lock(); 5920 for (i = 0; i < count; i++) { 5921 pPipeNode[i]->pipeline = pPipelines[i]; 5922 dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i]; 5923 } 5924 lock.unlock(); 5925 } else { 5926 for (i = 0; i < count; i++) { 5927 // Clean up any locally allocated data structures 5928 delete pPipeNode[i]; 5929 } 5930 lock.unlock(); 5931 return VK_ERROR_VALIDATION_FAILED_EXT; 5932 } 5933 return result; 5934} 5935 5936VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo, 5937 const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) { 5938 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5939 VkResult result = dev_data->device_dispatch_table->CreateSampler(device, pCreateInfo, pAllocator, pSampler); 5940 if (VK_SUCCESS == result) { 5941 std::lock_guard<std::mutex> lock(global_lock); 5942 dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_NODE>(new SAMPLER_NODE(pSampler, pCreateInfo)); 5943 } 5944 return result; 5945} 5946 5947VKAPI_ATTR VkResult VKAPI_CALL 5948CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo, 5949 const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) { 5950 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5951 VkResult result = dev_data->device_dispatch_table->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout); 5952 if (VK_SUCCESS == result) { 5953 // TODOSC : Capture layout bindings set 5954 std::lock_guard<std::mutex> lock(global_lock); 5955 dev_data->descriptorSetLayoutMap[*pSetLayout] = 5956 new cvdescriptorset::DescriptorSetLayout(dev_data->report_data, pCreateInfo, *pSetLayout); 5957 } 5958 return result; 5959} 5960 5961// Used by CreatePipelineLayout and CmdPushConstants. 5962// Note that the index argument is optional and only used by CreatePipelineLayout. 5963static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size, 5964 const char *caller_name, uint32_t index = 0) { 5965 uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize; 5966 bool skip_call = false; 5967 // Check that offset + size don't exceed the max. 5968 // Prevent arithetic overflow here by avoiding addition and testing in this order. 5969 if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) { 5970 // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem. 5971 if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) { 5972 skip_call |= 5973 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 5974 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with offset %u and size %u that " 5975 "exceeds this device's maxPushConstantSize of %u.", 5976 caller_name, index, offset, size, maxPushConstantsSize); 5977 } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) { 5978 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 5979 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that " 5980 "exceeds this device's maxPushConstantSize of %u.", 5981 caller_name, offset, size, maxPushConstantsSize); 5982 } else { 5983 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 5984 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name); 5985 } 5986 } 5987 // size needs to be non-zero and a multiple of 4. 5988 if ((size == 0) || ((size & 0x3) != 0)) { 5989 if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) { 5990 skip_call |= 5991 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 5992 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with " 5993 "size %u. Size must be greater than zero and a multiple of 4.", 5994 caller_name, index, size); 5995 } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) { 5996 skip_call |= 5997 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 5998 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with " 5999 "size %u. Size must be greater than zero and a multiple of 4.", 6000 caller_name, size); 6001 } else { 6002 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 6003 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name); 6004 } 6005 } 6006 // offset needs to be a multiple of 4. 6007 if ((offset & 0x3) != 0) { 6008 if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) { 6009 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 6010 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with " 6011 "offset %u. Offset must be a multiple of 4.", 6012 caller_name, index, offset); 6013 } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) { 6014 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 6015 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with " 6016 "offset %u. Offset must be a multiple of 4.", 6017 caller_name, offset); 6018 } else { 6019 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 6020 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name); 6021 } 6022 } 6023 return skip_call; 6024} 6025 6026VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo, 6027 const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) { 6028 bool skip_call = false; 6029 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6030 // Push Constant Range checks 6031 uint32_t i = 0; 6032 for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) { 6033 skip_call |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset, 6034 pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i); 6035 if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) { 6036 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 6037 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has no stageFlags set."); 6038 } 6039 } 6040 // Each range has been validated. Now check for overlap between ranges (if they are good). 6041 if (!skip_call) { 6042 uint32_t i, j; 6043 for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) { 6044 for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) { 6045 const uint32_t minA = pCreateInfo->pPushConstantRanges[i].offset; 6046 const uint32_t maxA = minA + pCreateInfo->pPushConstantRanges[i].size; 6047 const uint32_t minB = pCreateInfo->pPushConstantRanges[j].offset; 6048 const uint32_t maxB = minB + pCreateInfo->pPushConstantRanges[j].size; 6049 if ((minA <= minB && maxA > minB) || (minB <= minA && maxB > minA)) { 6050 skip_call |= 6051 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 6052 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constants with " 6053 "overlapping ranges: %u:[%u, %u), %u:[%u, %u)", 6054 i, minA, maxA, j, minB, maxB); 6055 } 6056 } 6057 } 6058 } 6059 6060 if (skip_call) 6061 return VK_ERROR_VALIDATION_FAILED_EXT; 6062 6063 VkResult result = dev_data->device_dispatch_table->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout); 6064 if (VK_SUCCESS == result) { 6065 std::lock_guard<std::mutex> lock(global_lock); 6066 PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout]; 6067 plNode.descriptorSetLayouts.resize(pCreateInfo->setLayoutCount); 6068 plNode.setLayouts.resize(pCreateInfo->setLayoutCount); 6069 for (i = 0; i < pCreateInfo->setLayoutCount; ++i) { 6070 plNode.descriptorSetLayouts[i] = pCreateInfo->pSetLayouts[i]; 6071 plNode.setLayouts[i] = getDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]); 6072 } 6073 plNode.pushConstantRanges.resize(pCreateInfo->pushConstantRangeCount); 6074 for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) { 6075 plNode.pushConstantRanges[i] = pCreateInfo->pPushConstantRanges[i]; 6076 } 6077 } 6078 return result; 6079} 6080 6081VKAPI_ATTR VkResult VKAPI_CALL 6082CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, 6083 VkDescriptorPool *pDescriptorPool) { 6084 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6085 VkResult result = dev_data->device_dispatch_table->CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool); 6086 if (VK_SUCCESS == result) { 6087 // Insert this pool into Global Pool LL at head 6088 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, 6089 (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool 0x%" PRIxLEAST64, 6090 (uint64_t)*pDescriptorPool)) 6091 return VK_ERROR_VALIDATION_FAILED_EXT; 6092 DESCRIPTOR_POOL_NODE *pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo); 6093 if (NULL == pNewNode) { 6094 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, 6095 (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", 6096 "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()")) 6097 return VK_ERROR_VALIDATION_FAILED_EXT; 6098 } else { 6099 std::lock_guard<std::mutex> lock(global_lock); 6100 dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode; 6101 } 6102 } else { 6103 // Need to do anything if pool create fails? 6104 } 6105 return result; 6106} 6107 6108VKAPI_ATTR VkResult VKAPI_CALL 6109ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) { 6110 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6111 VkResult result = dev_data->device_dispatch_table->ResetDescriptorPool(device, descriptorPool, flags); 6112 if (VK_SUCCESS == result) { 6113 std::lock_guard<std::mutex> lock(global_lock); 6114 clearDescriptorPool(dev_data, device, descriptorPool, flags); 6115 } 6116 return result; 6117} 6118// Ensure the pool contains enough descriptors and descriptor sets to satisfy 6119// an allocation request. Fills common_data with the total number of descriptors of each type required, 6120// as well as DescriptorSetLayout ptrs used for later update. 6121static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo, 6122 cvdescriptorset::AllocateDescriptorSetsData *common_data) { 6123 // All state checks for AllocateDescriptorSets is done in single function 6124 return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data->report_data, pAllocateInfo, dev_data, common_data); 6125} 6126// Allocation state was good and call down chain was made so update state based on allocating descriptor sets 6127static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo, 6128 VkDescriptorSet *pDescriptorSets, 6129 const cvdescriptorset::AllocateDescriptorSetsData *common_data) { 6130 // All the updates are contained in a single cvdescriptorset function 6131 cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap, 6132 &dev_data->setMap, dev_data); 6133} 6134 6135VKAPI_ATTR VkResult VKAPI_CALL 6136AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) { 6137 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6138 std::unique_lock<std::mutex> lock(global_lock); 6139 cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount); 6140 bool skip_call = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data); 6141 lock.unlock(); 6142 6143 if (skip_call) 6144 return VK_ERROR_VALIDATION_FAILED_EXT; 6145 6146 VkResult result = dev_data->device_dispatch_table->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets); 6147 6148 if (VK_SUCCESS == result) { 6149 lock.lock(); 6150 PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data); 6151 lock.unlock(); 6152 } 6153 return result; 6154} 6155// Verify state before freeing DescriptorSets 6156static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count, 6157 const VkDescriptorSet *descriptor_sets) { 6158 bool skip_call = false; 6159 // First make sure sets being destroyed are not currently in-use 6160 for (uint32_t i = 0; i < count; ++i) 6161 skip_call |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets"); 6162 6163 DESCRIPTOR_POOL_NODE *pool_node = getPoolNode(dev_data, pool); 6164 if (pool_node && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_node->createInfo.flags)) { 6165 // Can't Free from a NON_FREE pool 6166 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, 6167 reinterpret_cast<uint64_t &>(pool), __LINE__, DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, "DS", 6168 "It is invalid to call vkFreeDescriptorSets() with a pool created without setting " 6169 "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT."); 6170 } 6171 return skip_call; 6172} 6173// Sets have been removed from the pool so update underlying state 6174static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count, 6175 const VkDescriptorSet *descriptor_sets) { 6176 DESCRIPTOR_POOL_NODE *pool_state = getPoolNode(dev_data, pool); 6177 // Update available descriptor sets in pool 6178 pool_state->availableSets += count; 6179 6180 // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap 6181 for (uint32_t i = 0; i < count; ++i) { 6182 auto set_state = dev_data->setMap[descriptor_sets[i]]; 6183 uint32_t type_index = 0, descriptor_count = 0; 6184 for (uint32_t j = 0; j < set_state->GetBindingCount(); ++j) { 6185 type_index = static_cast<uint32_t>(set_state->GetTypeFromIndex(j)); 6186 descriptor_count = set_state->GetDescriptorCountFromIndex(j); 6187 pool_state->availableDescriptorTypeCount[type_index] += descriptor_count; 6188 } 6189 freeDescriptorSet(dev_data, set_state); 6190 pool_state->sets.erase(set_state); 6191 } 6192} 6193 6194VKAPI_ATTR VkResult VKAPI_CALL 6195FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) { 6196 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6197 // Make sure that no sets being destroyed are in-flight 6198 std::unique_lock<std::mutex> lock(global_lock); 6199 bool skip_call = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets); 6200 lock.unlock(); 6201 if (skip_call) 6202 return VK_ERROR_VALIDATION_FAILED_EXT; 6203 VkResult result = dev_data->device_dispatch_table->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets); 6204 if (VK_SUCCESS == result) { 6205 lock.lock(); 6206 PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets); 6207 lock.unlock(); 6208 } 6209 return result; 6210} 6211// TODO : This is a Proof-of-concept for core validation architecture 6212// Really we'll want to break out these functions to separate files but 6213// keeping it all together here to prove out design 6214// PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets() 6215static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount, 6216 const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount, 6217 const VkCopyDescriptorSet *pDescriptorCopies) { 6218 // First thing to do is perform map look-ups. 6219 // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets 6220 // so we can't just do a single map look-up up-front, but do them individually in functions below 6221 6222 // Now make call(s) that validate state, but don't perform state updates in this function 6223 // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the 6224 // namespace which will parse params and make calls into specific class instances 6225 return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites, 6226 descriptorCopyCount, pDescriptorCopies); 6227} 6228// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets() 6229static void PostCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount, 6230 const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount, 6231 const VkCopyDescriptorSet *pDescriptorCopies) { 6232 cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, 6233 pDescriptorCopies); 6234} 6235 6236VKAPI_ATTR void VKAPI_CALL 6237UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites, 6238 uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) { 6239 // Only map look-up at top level is for device-level layer_data 6240 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6241 std::unique_lock<std::mutex> lock(global_lock); 6242 bool skip_call = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, 6243 pDescriptorCopies); 6244 lock.unlock(); 6245 if (!skip_call) { 6246 dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, 6247 pDescriptorCopies); 6248 lock.lock(); 6249 // Since UpdateDescriptorSets() is void, nothing to check prior to updating state 6250 PostCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, 6251 pDescriptorCopies); 6252 } 6253} 6254 6255VKAPI_ATTR VkResult VKAPI_CALL 6256AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) { 6257 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6258 VkResult result = dev_data->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer); 6259 if (VK_SUCCESS == result) { 6260 std::unique_lock<std::mutex> lock(global_lock); 6261 auto pPool = getCommandPoolNode(dev_data, pCreateInfo->commandPool); 6262 6263 if (pPool) { 6264 for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) { 6265 // Add command buffer to its commandPool map 6266 pPool->commandBuffers.push_back(pCommandBuffer[i]); 6267 GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE; 6268 // Add command buffer to map 6269 dev_data->commandBufferMap[pCommandBuffer[i]] = pCB; 6270 resetCB(dev_data, pCommandBuffer[i]); 6271 pCB->createInfo = *pCreateInfo; 6272 pCB->device = device; 6273 } 6274 } 6275 printCBList(dev_data); 6276 lock.unlock(); 6277 } 6278 return result; 6279} 6280 6281VKAPI_ATTR VkResult VKAPI_CALL 6282BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) { 6283 bool skip_call = false; 6284 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6285 std::unique_lock<std::mutex> lock(global_lock); 6286 // Validate command buffer level 6287 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6288 if (pCB) { 6289 // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references 6290 if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) { 6291 skip_call |= 6292 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6293 (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM", 6294 "Calling vkBeginCommandBuffer() on active CB 0x%p before it has completed. " 6295 "You must check CB fence before this call.", 6296 commandBuffer); 6297 } 6298 clear_cmd_buf_and_mem_references(dev_data, pCB); 6299 if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) { 6300 // Secondary Command Buffer 6301 const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo; 6302 if (!pInfo) { 6303 skip_call |= 6304 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6305 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS", 6306 "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must have inheritance info.", 6307 reinterpret_cast<void *>(commandBuffer)); 6308 } else { 6309 if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) { 6310 if (!pInfo->renderPass) { // renderpass should NOT be null for a Secondary CB 6311 skip_call |= log_msg( 6312 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6313 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS", 6314 "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must specify a valid renderpass parameter.", 6315 reinterpret_cast<void *>(commandBuffer)); 6316 } 6317 if (!pInfo->framebuffer) { // framebuffer may be null for a Secondary CB, but this affects perf 6318 skip_call |= log_msg( 6319 dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6320 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS", 6321 "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) may perform better if a " 6322 "valid framebuffer parameter is specified.", 6323 reinterpret_cast<void *>(commandBuffer)); 6324 } else { 6325 string errorString = ""; 6326 auto framebuffer = getFramebuffer(dev_data, pInfo->framebuffer); 6327 if (framebuffer) { 6328 if ((framebuffer->createInfo.renderPass != pInfo->renderPass) && 6329 !verify_renderpass_compatibility(dev_data, framebuffer->renderPassCreateInfo.ptr(), 6330 getRenderPass(dev_data, pInfo->renderPass)->pCreateInfo, 6331 errorString)) { 6332 // renderPass that framebuffer was created with must be compatible with local renderPass 6333 skip_call |= log_msg( 6334 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 6335 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer), 6336 __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS", 6337 "vkBeginCommandBuffer(): Secondary Command " 6338 "Buffer (0x%p) renderPass (0x%" PRIxLEAST64 ") is incompatible w/ framebuffer " 6339 "(0x%" PRIxLEAST64 ") w/ render pass (0x%" PRIxLEAST64 ") due to: %s", 6340 reinterpret_cast<void *>(commandBuffer), reinterpret_cast<const uint64_t &>(pInfo->renderPass), 6341 reinterpret_cast<const uint64_t &>(pInfo->framebuffer), 6342 reinterpret_cast<uint64_t &>(framebuffer->createInfo.renderPass), errorString.c_str()); 6343 } 6344 // Connect this framebuffer to this cmdBuffer 6345 framebuffer->cb_bindings.insert(pCB); 6346 } 6347 } 6348 } 6349 if ((pInfo->occlusionQueryEnable == VK_FALSE || 6350 dev_data->phys_dev_properties.features.occlusionQueryPrecise == VK_FALSE) && 6351 (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) { 6352 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 6353 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer), 6354 __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS", 6355 "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must not have " 6356 "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not " 6357 "support precise occlusion queries.", 6358 reinterpret_cast<void *>(commandBuffer)); 6359 } 6360 } 6361 if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) { 6362 auto renderPass = getRenderPass(dev_data, pInfo->renderPass); 6363 if (renderPass) { 6364 if (pInfo->subpass >= renderPass->pCreateInfo->subpassCount) { 6365 skip_call |= log_msg( 6366 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6367 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS", 6368 "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must has a subpass index (%d) " 6369 "that is less than the number of subpasses (%d).", 6370 (void *)commandBuffer, pInfo->subpass, renderPass->pCreateInfo->subpassCount); 6371 } 6372 } 6373 } 6374 } 6375 if (CB_RECORDING == pCB->state) { 6376 skip_call |= 6377 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6378 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS", 6379 "vkBeginCommandBuffer(): Cannot call Begin on CB (0x%" PRIxLEAST64 6380 ") in the RECORDING state. Must first call vkEndCommandBuffer().", 6381 (uint64_t)commandBuffer); 6382 } else if (CB_RECORDED == pCB->state || (CB_INVALID == pCB->state && CMD_END == pCB->cmds.back().type)) { 6383 VkCommandPool cmdPool = pCB->createInfo.commandPool; 6384 auto pPool = getCommandPoolNode(dev_data, cmdPool); 6385 if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) { 6386 skip_call |= 6387 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6388 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS", 6389 "Call to vkBeginCommandBuffer() on command buffer (0x%" PRIxLEAST64 6390 ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIxLEAST64 6391 ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.", 6392 (uint64_t)commandBuffer, (uint64_t)cmdPool); 6393 } 6394 resetCB(dev_data, commandBuffer); 6395 } 6396 // Set updated state here in case implicit reset occurs above 6397 pCB->state = CB_RECORDING; 6398 pCB->beginInfo = *pBeginInfo; 6399 if (pCB->beginInfo.pInheritanceInfo) { 6400 pCB->inheritanceInfo = *(pCB->beginInfo.pInheritanceInfo); 6401 pCB->beginInfo.pInheritanceInfo = &pCB->inheritanceInfo; 6402 // If we are a secondary command-buffer and inheriting. Update the items we should inherit. 6403 if ((pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) && 6404 (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { 6405 pCB->activeRenderPass = getRenderPass(dev_data, pCB->beginInfo.pInheritanceInfo->renderPass); 6406 pCB->activeSubpass = pCB->beginInfo.pInheritanceInfo->subpass; 6407 pCB->framebuffers.insert(pCB->beginInfo.pInheritanceInfo->framebuffer); 6408 } 6409 } 6410 } else { 6411 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6412 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 6413 "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB 0x%p!", (void *)commandBuffer); 6414 } 6415 lock.unlock(); 6416 if (skip_call) { 6417 return VK_ERROR_VALIDATION_FAILED_EXT; 6418 } 6419 VkResult result = dev_data->device_dispatch_table->BeginCommandBuffer(commandBuffer, pBeginInfo); 6420 6421 return result; 6422} 6423 6424VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) { 6425 bool skip_call = false; 6426 VkResult result = VK_SUCCESS; 6427 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6428 std::unique_lock<std::mutex> lock(global_lock); 6429 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6430 if (pCB) { 6431 if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) || !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { 6432 // This needs spec clarification to update valid usage, see comments in PR: 6433 // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756 6434 skip_call |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer"); 6435 } 6436 skip_call |= addCmd(dev_data, pCB, CMD_END, "vkEndCommandBuffer()"); 6437 for (auto query : pCB->activeQueries) { 6438 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 6439 DRAWSTATE_INVALID_QUERY, "DS", 6440 "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d", 6441 (uint64_t)(query.pool), query.index); 6442 } 6443 } 6444 if (!skip_call) { 6445 lock.unlock(); 6446 result = dev_data->device_dispatch_table->EndCommandBuffer(commandBuffer); 6447 lock.lock(); 6448 if (VK_SUCCESS == result) { 6449 pCB->state = CB_RECORDED; 6450 // Reset CB status flags 6451 pCB->status = 0; 6452 printCB(dev_data, commandBuffer); 6453 } 6454 } else { 6455 result = VK_ERROR_VALIDATION_FAILED_EXT; 6456 } 6457 lock.unlock(); 6458 return result; 6459} 6460 6461VKAPI_ATTR VkResult VKAPI_CALL 6462ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) { 6463 bool skip_call = false; 6464 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6465 std::unique_lock<std::mutex> lock(global_lock); 6466 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6467 VkCommandPool cmdPool = pCB->createInfo.commandPool; 6468 auto pPool = getCommandPoolNode(dev_data, cmdPool); 6469 if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) { 6470 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6471 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS", 6472 "Attempt to reset command buffer (0x%" PRIxLEAST64 ") created from command pool (0x%" PRIxLEAST64 6473 ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.", 6474 (uint64_t)commandBuffer, (uint64_t)cmdPool); 6475 } 6476 skip_call |= checkCommandBufferInFlight(dev_data, pCB, "reset"); 6477 lock.unlock(); 6478 if (skip_call) 6479 return VK_ERROR_VALIDATION_FAILED_EXT; 6480 VkResult result = dev_data->device_dispatch_table->ResetCommandBuffer(commandBuffer, flags); 6481 if (VK_SUCCESS == result) { 6482 lock.lock(); 6483 dev_data->globalInFlightCmdBuffers.erase(commandBuffer); 6484 resetCB(dev_data, commandBuffer); 6485 lock.unlock(); 6486 } 6487 return result; 6488} 6489 6490VKAPI_ATTR void VKAPI_CALL 6491CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) { 6492 bool skip_call = false; 6493 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6494 std::unique_lock<std::mutex> lock(global_lock); 6495 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6496 if (pCB) { 6497 skip_call |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()"); 6498 if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) { 6499 skip_call |= 6500 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, 6501 (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS", 6502 "Incorrectly binding compute pipeline (0x%" PRIxLEAST64 ") during active RenderPass (0x%" PRIxLEAST64 ")", 6503 (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass->renderPass); 6504 } 6505 6506 PIPELINE_NODE *pPN = getPipeline(dev_data, pipeline); 6507 if (pPN) { 6508 pCB->lastBound[pipelineBindPoint].pipeline = pipeline; 6509 set_cb_pso_status(pCB, pPN); 6510 set_pipeline_state(pPN); 6511 } else { 6512 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, 6513 (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS", 6514 "Attempt to bind Pipeline 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline)); 6515 } 6516 } 6517 lock.unlock(); 6518 if (!skip_call) 6519 dev_data->device_dispatch_table->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline); 6520} 6521 6522VKAPI_ATTR void VKAPI_CALL 6523CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) { 6524 bool skip_call = false; 6525 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6526 std::unique_lock<std::mutex> lock(global_lock); 6527 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6528 if (pCB) { 6529 skip_call |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()"); 6530 pCB->status |= CBSTATUS_VIEWPORT_SET; 6531 pCB->viewports.resize(viewportCount); 6532 memcpy(pCB->viewports.data(), pViewports, viewportCount * sizeof(VkViewport)); 6533 } 6534 lock.unlock(); 6535 if (!skip_call) 6536 dev_data->device_dispatch_table->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports); 6537} 6538 6539VKAPI_ATTR void VKAPI_CALL 6540CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) { 6541 bool skip_call = false; 6542 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6543 std::unique_lock<std::mutex> lock(global_lock); 6544 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6545 if (pCB) { 6546 skip_call |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()"); 6547 pCB->status |= CBSTATUS_SCISSOR_SET; 6548 pCB->scissors.resize(scissorCount); 6549 memcpy(pCB->scissors.data(), pScissors, scissorCount * sizeof(VkRect2D)); 6550 } 6551 lock.unlock(); 6552 if (!skip_call) 6553 dev_data->device_dispatch_table->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors); 6554} 6555 6556VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) { 6557 bool skip_call = false; 6558 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6559 std::unique_lock<std::mutex> lock(global_lock); 6560 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6561 if (pCB) { 6562 skip_call |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()"); 6563 pCB->status |= CBSTATUS_LINE_WIDTH_SET; 6564 6565 PIPELINE_NODE *pPipeTrav = getPipeline(dev_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline); 6566 if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) { 6567 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 6568 reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SET, "DS", 6569 "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH " 6570 "flag. This is undefined behavior and could be ignored."); 6571 } else { 6572 skip_call |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, reinterpret_cast<uint64_t &>(commandBuffer), lineWidth); 6573 } 6574 } 6575 lock.unlock(); 6576 if (!skip_call) 6577 dev_data->device_dispatch_table->CmdSetLineWidth(commandBuffer, lineWidth); 6578} 6579 6580VKAPI_ATTR void VKAPI_CALL 6581CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) { 6582 bool skip_call = false; 6583 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6584 std::unique_lock<std::mutex> lock(global_lock); 6585 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6586 if (pCB) { 6587 skip_call |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()"); 6588 pCB->status |= CBSTATUS_DEPTH_BIAS_SET; 6589 } 6590 lock.unlock(); 6591 if (!skip_call) 6592 dev_data->device_dispatch_table->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, 6593 depthBiasSlopeFactor); 6594} 6595 6596VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) { 6597 bool skip_call = false; 6598 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6599 std::unique_lock<std::mutex> lock(global_lock); 6600 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6601 if (pCB) { 6602 skip_call |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()"); 6603 pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET; 6604 } 6605 lock.unlock(); 6606 if (!skip_call) 6607 dev_data->device_dispatch_table->CmdSetBlendConstants(commandBuffer, blendConstants); 6608} 6609 6610VKAPI_ATTR void VKAPI_CALL 6611CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) { 6612 bool skip_call = false; 6613 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6614 std::unique_lock<std::mutex> lock(global_lock); 6615 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6616 if (pCB) { 6617 skip_call |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()"); 6618 pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET; 6619 } 6620 lock.unlock(); 6621 if (!skip_call) 6622 dev_data->device_dispatch_table->CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds); 6623} 6624 6625VKAPI_ATTR void VKAPI_CALL 6626CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) { 6627 bool skip_call = false; 6628 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6629 std::unique_lock<std::mutex> lock(global_lock); 6630 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6631 if (pCB) { 6632 skip_call |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()"); 6633 pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET; 6634 } 6635 lock.unlock(); 6636 if (!skip_call) 6637 dev_data->device_dispatch_table->CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask); 6638} 6639 6640VKAPI_ATTR void VKAPI_CALL 6641CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) { 6642 bool skip_call = false; 6643 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6644 std::unique_lock<std::mutex> lock(global_lock); 6645 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6646 if (pCB) { 6647 skip_call |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()"); 6648 pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET; 6649 } 6650 lock.unlock(); 6651 if (!skip_call) 6652 dev_data->device_dispatch_table->CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask); 6653} 6654 6655VKAPI_ATTR void VKAPI_CALL 6656CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) { 6657 bool skip_call = false; 6658 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6659 std::unique_lock<std::mutex> lock(global_lock); 6660 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6661 if (pCB) { 6662 skip_call |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()"); 6663 pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET; 6664 } 6665 lock.unlock(); 6666 if (!skip_call) 6667 dev_data->device_dispatch_table->CmdSetStencilReference(commandBuffer, faceMask, reference); 6668} 6669 6670VKAPI_ATTR void VKAPI_CALL 6671CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, 6672 uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount, 6673 const uint32_t *pDynamicOffsets) { 6674 bool skip_call = false; 6675 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6676 std::unique_lock<std::mutex> lock(global_lock); 6677 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6678 if (pCB) { 6679 if (pCB->state == CB_RECORDING) { 6680 // Track total count of dynamic descriptor types to make sure we have an offset for each one 6681 uint32_t totalDynamicDescriptors = 0; 6682 string errorString = ""; 6683 uint32_t lastSetIndex = firstSet + setCount - 1; 6684 if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size()) { 6685 pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1); 6686 pCB->lastBound[pipelineBindPoint].dynamicOffsets.resize(lastSetIndex + 1); 6687 } 6688 auto oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex]; 6689 for (uint32_t i = 0; i < setCount; i++) { 6690 cvdescriptorset::DescriptorSet *pSet = getSetNode(dev_data, pDescriptorSets[i]); 6691 if (pSet) { 6692 pCB->lastBound[pipelineBindPoint].uniqueBoundSets.insert(pSet); 6693 pSet->BindCommandBuffer(pCB); 6694 pCB->lastBound[pipelineBindPoint].pipelineLayout = layout; 6695 pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pSet; 6696 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 6697 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__, 6698 DRAWSTATE_NONE, "DS", "DS 0x%" PRIxLEAST64 " bound on pipeline %s", 6699 (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint)); 6700 if (!pSet->IsUpdated() && (pSet->GetTotalDescriptorCount() != 0)) { 6701 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, 6702 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__, 6703 DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS", 6704 "DS 0x%" PRIxLEAST64 6705 " bound but it was never updated. You may want to either update it or not bind it.", 6706 (uint64_t)pDescriptorSets[i]); 6707 } 6708 // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout 6709 if (!verify_set_layout_compatibility(dev_data, pSet, layout, i + firstSet, errorString)) { 6710 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 6711 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__, 6712 DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS", 6713 "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout " 6714 "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s", 6715 i, i + firstSet, reinterpret_cast<uint64_t &>(layout), errorString.c_str()); 6716 } 6717 6718 auto setDynamicDescriptorCount = pSet->GetDynamicDescriptorCount(); 6719 6720 pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i].clear(); 6721 6722 if (setDynamicDescriptorCount) { 6723 // First make sure we won't overstep bounds of pDynamicOffsets array 6724 if ((totalDynamicDescriptors + setDynamicDescriptorCount) > dynamicOffsetCount) { 6725 skip_call |= 6726 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 6727 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__, 6728 DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS", 6729 "descriptorSet #%u (0x%" PRIxLEAST64 6730 ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets " 6731 "array. There must be one dynamic offset for each dynamic descriptor being bound.", 6732 i, (uint64_t)pDescriptorSets[i], pSet->GetDynamicDescriptorCount(), 6733 (dynamicOffsetCount - totalDynamicDescriptors)); 6734 } else { // Validate and store dynamic offsets with the set 6735 // Validate Dynamic Offset Minimums 6736 uint32_t cur_dyn_offset = totalDynamicDescriptors; 6737 for (uint32_t d = 0; d < pSet->GetTotalDescriptorCount(); d++) { 6738 if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) { 6739 if (vk_safe_modulo( 6740 pDynamicOffsets[cur_dyn_offset], 6741 dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) { 6742 skip_call |= log_msg( 6743 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 6744 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, 6745 DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS", 6746 "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of " 6747 "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64, 6748 cur_dyn_offset, pDynamicOffsets[cur_dyn_offset], 6749 dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment); 6750 } 6751 cur_dyn_offset++; 6752 } else if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) { 6753 if (vk_safe_modulo( 6754 pDynamicOffsets[cur_dyn_offset], 6755 dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) { 6756 skip_call |= log_msg( 6757 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 6758 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, 6759 DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS", 6760 "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of " 6761 "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64, 6762 cur_dyn_offset, pDynamicOffsets[cur_dyn_offset], 6763 dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment); 6764 } 6765 cur_dyn_offset++; 6766 } 6767 } 6768 6769 pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i] = 6770 std::vector<uint32_t>(pDynamicOffsets + totalDynamicDescriptors, 6771 pDynamicOffsets + totalDynamicDescriptors + setDynamicDescriptorCount); 6772 // Keep running total of dynamic descriptor count to verify at the end 6773 totalDynamicDescriptors += setDynamicDescriptorCount; 6774 6775 } 6776 } 6777 } else { 6778 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 6779 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__, 6780 DRAWSTATE_INVALID_SET, "DS", "Attempt to bind DS 0x%" PRIxLEAST64 " that doesn't exist!", 6781 (uint64_t)pDescriptorSets[i]); 6782 } 6783 skip_call |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()"); 6784 // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update 6785 if (firstSet > 0) { // Check set #s below the first bound set 6786 for (uint32_t i = 0; i < firstSet; ++i) { 6787 if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] && 6788 !verify_set_layout_compatibility(dev_data, pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], 6789 layout, i, errorString)) { 6790 skip_call |= log_msg( 6791 dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, 6792 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 6793 (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS", 6794 "DescriptorSetDS 0x%" PRIxLEAST64 6795 " previously bound as set #%u was disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")", 6796 (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout); 6797 pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE; 6798 } 6799 } 6800 } 6801 // Check if newly last bound set invalidates any remaining bound sets 6802 if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) { 6803 if (oldFinalBoundSet && 6804 !verify_set_layout_compatibility(dev_data, oldFinalBoundSet, layout, lastSetIndex, errorString)) { 6805 auto old_set = oldFinalBoundSet->GetSet(); 6806 skip_call |= 6807 log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, 6808 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<uint64_t &>(old_set), __LINE__, 6809 DRAWSTATE_NONE, "DS", "DescriptorSetDS 0x%" PRIxLEAST64 6810 " previously bound as set #%u is incompatible with set 0x%" PRIxLEAST64 6811 " newly bound as set #%u so set #%u and any subsequent sets were " 6812 "disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")", 6813 reinterpret_cast<uint64_t &>(old_set), lastSetIndex, 6814 (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex, 6815 lastSetIndex + 1, (uint64_t)layout); 6816 pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1); 6817 } 6818 } 6819 } 6820 // dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound 6821 if (totalDynamicDescriptors != dynamicOffsetCount) { 6822 skip_call |= 6823 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6824 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS", 6825 "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount " 6826 "is %u. It should exactly match the number of dynamic descriptors.", 6827 setCount, totalDynamicDescriptors, dynamicOffsetCount); 6828 } 6829 } else { 6830 skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()"); 6831 } 6832 } 6833 lock.unlock(); 6834 if (!skip_call) 6835 dev_data->device_dispatch_table->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount, 6836 pDescriptorSets, dynamicOffsetCount, pDynamicOffsets); 6837} 6838 6839VKAPI_ATTR void VKAPI_CALL 6840CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) { 6841 bool skip_call = false; 6842 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6843 // TODO : Somewhere need to verify that IBs have correct usage state flagged 6844 std::unique_lock<std::mutex> lock(global_lock); 6845 auto buff_node = getBufferNode(dev_data, buffer); 6846 auto cb_node = getCBNode(dev_data, commandBuffer); 6847 if (cb_node && buff_node) { 6848 std::function<bool()> function = [=]() { 6849 return validate_memory_is_valid(dev_data, buff_node->mem, "vkCmdBindIndexBuffer()"); 6850 }; 6851 cb_node->validate_functions.push_back(function); 6852 skip_call |= addCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()"); 6853 VkDeviceSize offset_align = 0; 6854 switch (indexType) { 6855 case VK_INDEX_TYPE_UINT16: 6856 offset_align = 2; 6857 break; 6858 case VK_INDEX_TYPE_UINT32: 6859 offset_align = 4; 6860 break; 6861 default: 6862 // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0 6863 break; 6864 } 6865 if (!offset_align || (offset % offset_align)) { 6866 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 6867 DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS", 6868 "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.", 6869 offset, string_VkIndexType(indexType)); 6870 } 6871 cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND; 6872 } else { 6873 assert(0); 6874 } 6875 lock.unlock(); 6876 if (!skip_call) 6877 dev_data->device_dispatch_table->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType); 6878} 6879 6880void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) { 6881 uint32_t end = firstBinding + bindingCount; 6882 if (pCB->currentDrawData.buffers.size() < end) { 6883 pCB->currentDrawData.buffers.resize(end); 6884 } 6885 for (uint32_t i = 0; i < bindingCount; ++i) { 6886 pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i]; 6887 } 6888} 6889 6890static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); } 6891 6892VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, 6893 uint32_t bindingCount, const VkBuffer *pBuffers, 6894 const VkDeviceSize *pOffsets) { 6895 bool skip_call = false; 6896 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6897 // TODO : Somewhere need to verify that VBs have correct usage state flagged 6898 std::unique_lock<std::mutex> lock(global_lock); 6899 auto cb_node = getCBNode(dev_data, commandBuffer); 6900 if (cb_node) { 6901 for (uint32_t i = 0; i < bindingCount; ++i) { 6902 auto buff_node = getBufferNode(dev_data, pBuffers[i]); 6903 assert(buff_node); 6904 6905 std::function<bool()> function = [=]() { 6906 return validate_memory_is_valid(dev_data, buff_node->mem, "vkCmdBindVertexBuffers()"); 6907 }; 6908 cb_node->validate_functions.push_back(function); 6909 } 6910 addCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()"); 6911 updateResourceTracking(cb_node, firstBinding, bindingCount, pBuffers); 6912 } else { 6913 skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()"); 6914 } 6915 lock.unlock(); 6916 if (!skip_call) 6917 dev_data->device_dispatch_table->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets); 6918} 6919 6920/* expects global_lock to be held by caller */ 6921static bool markStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) { 6922 bool skip_call = false; 6923 6924 for (auto imageView : pCB->updateImages) { 6925 auto iv_data = getImageViewData(dev_data, imageView); 6926 if (!iv_data) 6927 continue; 6928 6929 auto img_node = getImageNode(dev_data, iv_data->image); 6930 assert(img_node); 6931 std::function<bool()> function = [=]() { 6932 set_memory_valid(dev_data, img_node->mem, true, iv_data->image); 6933 return false; 6934 }; 6935 pCB->validate_functions.push_back(function); 6936 } 6937 for (auto buffer : pCB->updateBuffers) { 6938 auto buff_node = getBufferNode(dev_data, buffer); 6939 assert(buff_node); 6940 std::function<bool()> function = [=]() { 6941 set_memory_valid(dev_data, buff_node->mem, true); 6942 return false; 6943 }; 6944 pCB->validate_functions.push_back(function); 6945 } 6946 return skip_call; 6947} 6948 6949VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, 6950 uint32_t firstVertex, uint32_t firstInstance) { 6951 bool skip_call = false; 6952 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6953 std::unique_lock<std::mutex> lock(global_lock); 6954 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6955 if (pCB) { 6956 skip_call |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()"); 6957 pCB->drawCount[DRAW]++; 6958 skip_call |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS); 6959 skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, pCB); 6960 // TODO : Need to pass commandBuffer as srcObj here 6961 skip_call |= 6962 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 6963 __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW]++); 6964 skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer); 6965 if (!skip_call) { 6966 updateResourceTrackingOnDraw(pCB); 6967 } 6968 skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdDraw"); 6969 } 6970 lock.unlock(); 6971 if (!skip_call) 6972 dev_data->device_dispatch_table->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance); 6973} 6974 6975VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, 6976 uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, 6977 uint32_t firstInstance) { 6978 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6979 bool skip_call = false; 6980 std::unique_lock<std::mutex> lock(global_lock); 6981 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6982 if (pCB) { 6983 skip_call |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()"); 6984 pCB->drawCount[DRAW_INDEXED]++; 6985 skip_call |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS); 6986 skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, pCB); 6987 // TODO : Need to pass commandBuffer as srcObj here 6988 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 6989 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS", 6990 "vkCmdDrawIndexed() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++); 6991 skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer); 6992 if (!skip_call) { 6993 updateResourceTrackingOnDraw(pCB); 6994 } 6995 skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed"); 6996 } 6997 lock.unlock(); 6998 if (!skip_call) 6999 dev_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, 7000 firstInstance); 7001} 7002 7003VKAPI_ATTR void VKAPI_CALL 7004CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) { 7005 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7006 bool skip_call = false; 7007 std::unique_lock<std::mutex> lock(global_lock); 7008 7009 auto cb_node = getCBNode(dev_data, commandBuffer); 7010 auto buff_node = getBufferNode(dev_data, buffer); 7011 if (cb_node && buff_node) { 7012 skip_call |= addCommandBufferBindingBuffer(dev_data, cb_node, buff_node, "vkCmdDrawIndirect"); 7013 skip_call |= addCmd(dev_data, cb_node, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()"); 7014 cb_node->drawCount[DRAW_INDIRECT]++; 7015 skip_call |= validate_and_update_draw_state(dev_data, cb_node, false, VK_PIPELINE_BIND_POINT_GRAPHICS); 7016 skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, cb_node); 7017 // TODO : Need to pass commandBuffer as srcObj here 7018 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 7019 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS", 7020 "vkCmdDrawIndirect() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++); 7021 skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer); 7022 if (!skip_call) { 7023 updateResourceTrackingOnDraw(cb_node); 7024 } 7025 skip_call |= outsideRenderPass(dev_data, cb_node, "vkCmdDrawIndirect"); 7026 } else { 7027 assert(0); 7028 } 7029 lock.unlock(); 7030 if (!skip_call) 7031 dev_data->device_dispatch_table->CmdDrawIndirect(commandBuffer, buffer, offset, count, stride); 7032} 7033 7034VKAPI_ATTR void VKAPI_CALL 7035CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) { 7036 bool skip_call = false; 7037 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7038 std::unique_lock<std::mutex> lock(global_lock); 7039 7040 auto cb_node = getCBNode(dev_data, commandBuffer); 7041 auto buff_node = getBufferNode(dev_data, buffer); 7042 if (cb_node && buff_node) { 7043 skip_call |= addCommandBufferBindingBuffer(dev_data, cb_node, buff_node, "vkCmdDrawIndexedIndirect"); 7044 skip_call |= addCmd(dev_data, cb_node, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()"); 7045 cb_node->drawCount[DRAW_INDEXED_INDIRECT]++; 7046 skip_call |= validate_and_update_draw_state(dev_data, cb_node, true, VK_PIPELINE_BIND_POINT_GRAPHICS); 7047 skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, cb_node); 7048 // TODO : Need to pass commandBuffer as srcObj here 7049 skip_call |= 7050 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 7051 __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call 0x%" PRIx64 ", reporting DS state:", 7052 g_drawCount[DRAW_INDEXED_INDIRECT]++); 7053 skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer); 7054 if (!skip_call) { 7055 updateResourceTrackingOnDraw(cb_node); 7056 } 7057 skip_call |= outsideRenderPass(dev_data, cb_node, "vkCmdDrawIndexedIndirect"); 7058 } else { 7059 assert(0); 7060 } 7061 lock.unlock(); 7062 if (!skip_call) 7063 dev_data->device_dispatch_table->CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride); 7064} 7065 7066VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) { 7067 bool skip_call = false; 7068 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7069 std::unique_lock<std::mutex> lock(global_lock); 7070 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7071 if (pCB) { 7072 skip_call |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE); 7073 skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, pCB); 7074 skip_call |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()"); 7075 skip_call |= insideRenderPass(dev_data, pCB, "vkCmdDispatch"); 7076 } 7077 lock.unlock(); 7078 if (!skip_call) 7079 dev_data->device_dispatch_table->CmdDispatch(commandBuffer, x, y, z); 7080} 7081 7082VKAPI_ATTR void VKAPI_CALL 7083CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) { 7084 bool skip_call = false; 7085 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7086 std::unique_lock<std::mutex> lock(global_lock); 7087 7088 auto cb_node = getCBNode(dev_data, commandBuffer); 7089 auto buff_node = getBufferNode(dev_data, buffer); 7090 if (cb_node) { 7091 skip_call |= addCommandBufferBindingBuffer(dev_data, cb_node, buff_node, "vkCmdDispatchIndirect"); 7092 skip_call |= validate_and_update_draw_state(dev_data, cb_node, false, VK_PIPELINE_BIND_POINT_COMPUTE); 7093 skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, cb_node); 7094 skip_call |= addCmd(dev_data, cb_node, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()"); 7095 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdDispatchIndirect"); 7096 } 7097 lock.unlock(); 7098 if (!skip_call) 7099 dev_data->device_dispatch_table->CmdDispatchIndirect(commandBuffer, buffer, offset); 7100} 7101 7102VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, 7103 uint32_t regionCount, const VkBufferCopy *pRegions) { 7104 bool skip_call = false; 7105 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7106 std::unique_lock<std::mutex> lock(global_lock); 7107 7108 auto cb_node = getCBNode(dev_data, commandBuffer); 7109 auto src_buff_node = getBufferNode(dev_data, srcBuffer); 7110 auto dst_buff_node = getBufferNode(dev_data, dstBuffer); 7111 if (cb_node && src_buff_node && dst_buff_node) { 7112 // Update bindings between buffers and cmd buffer 7113 skip_call |= addCommandBufferBindingBuffer(dev_data, cb_node, src_buff_node, "vkCmdCopyBuffer"); 7114 skip_call |= addCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node, "vkCmdCopyBuffer"); 7115 // Validate that SRC & DST buffers have correct usage flags set 7116 skip_call |= validateBufferUsageFlags(dev_data, src_buff_node, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyBuffer()", 7117 "VK_BUFFER_USAGE_TRANSFER_SRC_BIT"); 7118 skip_call |= validateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyBuffer()", 7119 "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); 7120 7121 std::function<bool()> function = [=]() { 7122 return validate_memory_is_valid(dev_data, src_buff_node->mem, "vkCmdCopyBuffer()"); 7123 }; 7124 cb_node->validate_functions.push_back(function); 7125 function = [=]() { 7126 set_memory_valid(dev_data, dst_buff_node->mem, true); 7127 return false; 7128 }; 7129 cb_node->validate_functions.push_back(function); 7130 7131 skip_call |= addCmd(dev_data, cb_node, CMD_COPYBUFFER, "vkCmdCopyBuffer()"); 7132 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBuffer"); 7133 } else { 7134 // Param_checker will flag errors on invalid objects, just assert here as debugging aid 7135 assert(0); 7136 } 7137 lock.unlock(); 7138 if (!skip_call) 7139 dev_data->device_dispatch_table->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions); 7140} 7141 7142static bool VerifySourceImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage srcImage, 7143 VkImageSubresourceLayers subLayers, VkImageLayout srcImageLayout) { 7144 bool skip_call = false; 7145 7146 for (uint32_t i = 0; i < subLayers.layerCount; ++i) { 7147 uint32_t layer = i + subLayers.baseArrayLayer; 7148 VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer}; 7149 IMAGE_CMD_BUF_LAYOUT_NODE node; 7150 if (!FindLayout(cb_node, srcImage, sub, node)) { 7151 SetLayout(cb_node, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout)); 7152 continue; 7153 } 7154 if (node.layout != srcImageLayout) { 7155 // TODO: Improve log message in the next pass 7156 skip_call |= 7157 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 7158 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s " 7159 "and doesn't match the current layout %s.", 7160 string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout)); 7161 } 7162 } 7163 if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) { 7164 if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) { 7165 // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning. 7166 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 7167 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 7168 "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL."); 7169 } else { 7170 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7171 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be " 7172 "TRANSFER_SRC_OPTIMAL or GENERAL.", 7173 string_VkImageLayout(srcImageLayout)); 7174 } 7175 } 7176 return skip_call; 7177} 7178 7179static bool VerifyDestImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage destImage, 7180 VkImageSubresourceLayers subLayers, VkImageLayout destImageLayout) { 7181 bool skip_call = false; 7182 7183 for (uint32_t i = 0; i < subLayers.layerCount; ++i) { 7184 uint32_t layer = i + subLayers.baseArrayLayer; 7185 VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer}; 7186 IMAGE_CMD_BUF_LAYOUT_NODE node; 7187 if (!FindLayout(cb_node, destImage, sub, node)) { 7188 SetLayout(cb_node, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout)); 7189 continue; 7190 } 7191 if (node.layout != destImageLayout) { 7192 skip_call |= 7193 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 7194 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and " 7195 "doesn't match the current layout %s.", 7196 string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout)); 7197 } 7198 } 7199 if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) { 7200 if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) { 7201 // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning. 7202 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 7203 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 7204 "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL."); 7205 } else { 7206 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7207 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be " 7208 "TRANSFER_DST_OPTIMAL or GENERAL.", 7209 string_VkImageLayout(destImageLayout)); 7210 } 7211 } 7212 return skip_call; 7213} 7214 7215VKAPI_ATTR void VKAPI_CALL 7216CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, 7217 VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) { 7218 bool skip_call = false; 7219 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7220 std::unique_lock<std::mutex> lock(global_lock); 7221 7222 auto cb_node = getCBNode(dev_data, commandBuffer); 7223 auto src_img_node = getImageNode(dev_data, srcImage); 7224 auto dst_img_node = getImageNode(dev_data, dstImage); 7225 if (cb_node && src_img_node && dst_img_node) { 7226 // Update bindings between images and cmd buffer 7227 skip_call |= addCommandBufferBindingImage(dev_data, cb_node, src_img_node, "vkCmdCopyImage"); 7228 skip_call |= addCommandBufferBindingImage(dev_data, cb_node, dst_img_node, "vkCmdCopyImage"); 7229 // Validate that SRC & DST images have correct usage flags set 7230 skip_call |= validateImageUsageFlags(dev_data, src_img_node, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyImage()", 7231 "VK_BUFFER_USAGE_TRANSFER_SRC_BIT"); 7232 skip_call |= validateImageUsageFlags(dev_data, dst_img_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyImage()", 7233 "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); 7234 std::function<bool()> function = [=]() { 7235 return validate_memory_is_valid(dev_data, src_img_node->mem, "vkCmdCopyImage()", srcImage); 7236 }; 7237 cb_node->validate_functions.push_back(function); 7238 function = [=]() { 7239 set_memory_valid(dev_data, dst_img_node->mem, true, dstImage); 7240 return false; 7241 }; 7242 cb_node->validate_functions.push_back(function); 7243 7244 skip_call |= addCmd(dev_data, cb_node, CMD_COPYIMAGE, "vkCmdCopyImage()"); 7245 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImage"); 7246 for (uint32_t i = 0; i < regionCount; ++i) { 7247 skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].srcSubresource, srcImageLayout); 7248 skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].dstSubresource, dstImageLayout); 7249 } 7250 } else { 7251 assert(0); 7252 } 7253 lock.unlock(); 7254 if (!skip_call) 7255 dev_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, 7256 regionCount, pRegions); 7257} 7258 7259VKAPI_ATTR void VKAPI_CALL 7260CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, 7261 VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) { 7262 bool skip_call = false; 7263 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7264 std::unique_lock<std::mutex> lock(global_lock); 7265 7266 auto cb_node = getCBNode(dev_data, commandBuffer); 7267 auto src_img_node = getImageNode(dev_data, srcImage); 7268 auto dst_img_node = getImageNode(dev_data, dstImage); 7269 if (cb_node && src_img_node && dst_img_node) { 7270 // Update bindings between images and cmd buffer 7271 skip_call |= addCommandBufferBindingImage(dev_data, cb_node, src_img_node, "vkCmdBlitImage"); 7272 skip_call |= addCommandBufferBindingImage(dev_data, cb_node, dst_img_node, "vkCmdBlitImage"); 7273 // Validate that SRC & DST images have correct usage flags set 7274 skip_call |= validateImageUsageFlags(dev_data, src_img_node, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, "vkCmdBlitImage()", 7275 "VK_BUFFER_USAGE_TRANSFER_SRC_BIT"); 7276 skip_call |= validateImageUsageFlags(dev_data, dst_img_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdBlitImage()", 7277 "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); 7278 std::function<bool()> function = [=]() { 7279 return validate_memory_is_valid(dev_data, src_img_node->mem, "vkCmdBlitImage()", srcImage); 7280 }; 7281 cb_node->validate_functions.push_back(function); 7282 function = [=]() { 7283 set_memory_valid(dev_data, dst_img_node->mem, true, dstImage); 7284 return false; 7285 }; 7286 cb_node->validate_functions.push_back(function); 7287 7288 skip_call |= addCmd(dev_data, cb_node, CMD_BLITIMAGE, "vkCmdBlitImage()"); 7289 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdBlitImage"); 7290 } else { 7291 assert(0); 7292 } 7293 lock.unlock(); 7294 if (!skip_call) 7295 dev_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, 7296 regionCount, pRegions, filter); 7297} 7298 7299VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, 7300 VkImage dstImage, VkImageLayout dstImageLayout, 7301 uint32_t regionCount, const VkBufferImageCopy *pRegions) { 7302 bool skip_call = false; 7303 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7304 std::unique_lock<std::mutex> lock(global_lock); 7305 7306 auto cb_node = getCBNode(dev_data, commandBuffer); 7307 auto src_buff_node = getBufferNode(dev_data, srcBuffer); 7308 auto dst_img_node = getImageNode(dev_data, dstImage); 7309 if (cb_node && src_buff_node && dst_img_node) { 7310 skip_call |= addCommandBufferBindingBuffer(dev_data, cb_node, src_buff_node, "vkCmdCopyBufferToImage"); 7311 skip_call |= addCommandBufferBindingImage(dev_data, cb_node, dst_img_node, "vkCmdCopyBufferToImage"); 7312 skip_call |= validateBufferUsageFlags(dev_data, src_buff_node, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, 7313 "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT"); 7314 skip_call |= validateImageUsageFlags(dev_data, dst_img_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, 7315 "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); 7316 std::function<bool()> function = [=]() { 7317 set_memory_valid(dev_data, dst_img_node->mem, true, dstImage); 7318 return false; 7319 }; 7320 cb_node->validate_functions.push_back(function); 7321 function = [=]() { return validate_memory_is_valid(dev_data, src_buff_node->mem, "vkCmdCopyBufferToImage()"); }; 7322 cb_node->validate_functions.push_back(function); 7323 7324 skip_call |= addCmd(dev_data, cb_node, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()"); 7325 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBufferToImage"); 7326 for (uint32_t i = 0; i < regionCount; ++i) { 7327 skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].imageSubresource, dstImageLayout); 7328 } 7329 } else { 7330 assert(0); 7331 } 7332 lock.unlock(); 7333 if (!skip_call) 7334 dev_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, 7335 pRegions); 7336} 7337 7338VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, 7339 VkImageLayout srcImageLayout, VkBuffer dstBuffer, 7340 uint32_t regionCount, const VkBufferImageCopy *pRegions) { 7341 bool skip_call = false; 7342 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7343 std::unique_lock<std::mutex> lock(global_lock); 7344 7345 auto cb_node = getCBNode(dev_data, commandBuffer); 7346 auto src_img_node = getImageNode(dev_data, srcImage); 7347 auto dst_buff_node = getBufferNode(dev_data, dstBuffer); 7348 if (cb_node && src_img_node && dst_buff_node) { 7349 // Update bindings between buffer/image and cmd buffer 7350 skip_call |= addCommandBufferBindingImage(dev_data, cb_node, src_img_node, "vkCmdCopyImageToBuffer"); 7351 skip_call |= addCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node, "vkCmdCopyImageToBuffer"); 7352 // Validate that SRC image & DST buffer have correct usage flags set 7353 skip_call |= validateImageUsageFlags(dev_data, src_img_node, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, 7354 "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT"); 7355 skip_call |= validateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, 7356 "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); 7357 std::function<bool()> function = [=]() { 7358 return validate_memory_is_valid(dev_data, src_img_node->mem, "vkCmdCopyImageToBuffer()", srcImage); 7359 }; 7360 cb_node->validate_functions.push_back(function); 7361 function = [=]() { 7362 set_memory_valid(dev_data, dst_buff_node->mem, true); 7363 return false; 7364 }; 7365 cb_node->validate_functions.push_back(function); 7366 7367 skip_call |= addCmd(dev_data, cb_node, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()"); 7368 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImageToBuffer"); 7369 for (uint32_t i = 0; i < regionCount; ++i) { 7370 skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].imageSubresource, srcImageLayout); 7371 } 7372 } else { 7373 assert(0); 7374 } 7375 lock.unlock(); 7376 if (!skip_call) 7377 dev_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, 7378 pRegions); 7379} 7380 7381VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, 7382 VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) { 7383 bool skip_call = false; 7384 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7385 std::unique_lock<std::mutex> lock(global_lock); 7386 7387 auto cb_node = getCBNode(dev_data, commandBuffer); 7388 auto dst_buff_node = getBufferNode(dev_data, dstBuffer); 7389 if (cb_node && dst_buff_node) { 7390 // Update bindings between buffer and cmd buffer 7391 skip_call |= addCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node, "vkCmdUpdateBuffer"); 7392 // Validate that DST buffer has correct usage flags set 7393 skip_call |= validateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, 7394 "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); 7395 std::function<bool()> function = [=]() { 7396 set_memory_valid(dev_data, dst_buff_node->mem, true); 7397 return false; 7398 }; 7399 cb_node->validate_functions.push_back(function); 7400 7401 skip_call |= addCmd(dev_data, cb_node, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()"); 7402 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyUpdateBuffer"); 7403 } else { 7404 assert(0); 7405 } 7406 lock.unlock(); 7407 if (!skip_call) 7408 dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData); 7409} 7410 7411VKAPI_ATTR void VKAPI_CALL 7412CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) { 7413 bool skip_call = false; 7414 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7415 std::unique_lock<std::mutex> lock(global_lock); 7416 7417 auto cb_node = getCBNode(dev_data, commandBuffer); 7418 auto dst_buff_node = getBufferNode(dev_data, dstBuffer); 7419 if (cb_node && dst_buff_node) { 7420 // Update bindings between buffer and cmd buffer 7421 skip_call |= addCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node, "vkCmdFillBuffer"); 7422 // Validate that DST buffer has correct usage flags set 7423 skip_call |= validateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdFillBuffer()", 7424 "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); 7425 std::function<bool()> function = [=]() { 7426 set_memory_valid(dev_data, dst_buff_node->mem, true); 7427 return false; 7428 }; 7429 cb_node->validate_functions.push_back(function); 7430 7431 skip_call |= addCmd(dev_data, cb_node, CMD_FILLBUFFER, "vkCmdFillBuffer()"); 7432 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyFillBuffer"); 7433 } else { 7434 assert(0); 7435 } 7436 lock.unlock(); 7437 if (!skip_call) 7438 dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data); 7439} 7440 7441VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount, 7442 const VkClearAttachment *pAttachments, uint32_t rectCount, 7443 const VkClearRect *pRects) { 7444 bool skip_call = false; 7445 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7446 std::unique_lock<std::mutex> lock(global_lock); 7447 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7448 if (pCB) { 7449 skip_call |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()"); 7450 // Warn if this is issued prior to Draw Cmd and clearing the entire attachment 7451 if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) && 7452 (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) { 7453 // TODO : commandBuffer should be srcObj 7454 // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass) 7455 // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must 7456 // call CmdClearAttachments 7457 // Otherwise this seems more like a performance warning. 7458 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, 7459 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS", 7460 "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds." 7461 " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.", 7462 (uint64_t)(commandBuffer)); 7463 } 7464 skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments"); 7465 } 7466 7467 // Validate that attachment is in reference list of active subpass 7468 if (pCB->activeRenderPass) { 7469 const VkRenderPassCreateInfo *pRPCI = pCB->activeRenderPass->pCreateInfo; 7470 const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass]; 7471 7472 for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) { 7473 const VkClearAttachment *attachment = &pAttachments[attachment_idx]; 7474 if (attachment->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) { 7475 bool found = false; 7476 for (uint32_t i = 0; i < pSD->colorAttachmentCount; i++) { 7477 if (attachment->colorAttachment == pSD->pColorAttachments[i].attachment) { 7478 found = true; 7479 break; 7480 } 7481 } 7482 if (!found) { 7483 skip_call |= log_msg( 7484 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 7485 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS", 7486 "vkCmdClearAttachments() attachment index %d not found in attachment reference array of active subpass %d", 7487 attachment->colorAttachment, pCB->activeSubpass); 7488 } 7489 } else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) { 7490 if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass 7491 (pSD->pDepthStencilAttachment->attachment == 7492 VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass 7493 7494 skip_call |= log_msg( 7495 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 7496 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS", 7497 "vkCmdClearAttachments() attachment index %d does not match depthStencilAttachment.attachment (%d) found " 7498 "in active subpass %d", 7499 attachment->colorAttachment, 7500 (pSD->pDepthStencilAttachment) ? pSD->pDepthStencilAttachment->attachment : VK_ATTACHMENT_UNUSED, 7501 pCB->activeSubpass); 7502 } 7503 } 7504 } 7505 } 7506 lock.unlock(); 7507 if (!skip_call) 7508 dev_data->device_dispatch_table->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects); 7509} 7510 7511VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, 7512 VkImageLayout imageLayout, const VkClearColorValue *pColor, 7513 uint32_t rangeCount, const VkImageSubresourceRange *pRanges) { 7514 bool skip_call = false; 7515 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7516 std::unique_lock<std::mutex> lock(global_lock); 7517 // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state 7518 7519 auto cb_node = getCBNode(dev_data, commandBuffer); 7520 auto img_node = getImageNode(dev_data, image); 7521 if (cb_node && img_node) { 7522 skip_call |= addCommandBufferBindingImage(dev_data, cb_node, img_node, "vkCmdClearColorImage"); 7523 std::function<bool()> function = [=]() { 7524 set_memory_valid(dev_data, img_node->mem, true, image); 7525 return false; 7526 }; 7527 cb_node->validate_functions.push_back(function); 7528 7529 skip_call |= addCmd(dev_data, cb_node, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()"); 7530 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdClearColorImage"); 7531 } else { 7532 assert(0); 7533 } 7534 lock.unlock(); 7535 if (!skip_call) 7536 dev_data->device_dispatch_table->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges); 7537} 7538 7539VKAPI_ATTR void VKAPI_CALL 7540CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, 7541 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount, 7542 const VkImageSubresourceRange *pRanges) { 7543 bool skip_call = false; 7544 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7545 std::unique_lock<std::mutex> lock(global_lock); 7546 // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state 7547 7548 auto cb_node = getCBNode(dev_data, commandBuffer); 7549 auto img_node = getImageNode(dev_data, image); 7550 if (cb_node && img_node) { 7551 skip_call |= addCommandBufferBindingImage(dev_data, cb_node, img_node, "vkCmdClearDepthStencilImage"); 7552 std::function<bool()> function = [=]() { 7553 set_memory_valid(dev_data, img_node->mem, true, image); 7554 return false; 7555 }; 7556 cb_node->validate_functions.push_back(function); 7557 7558 skip_call |= addCmd(dev_data, cb_node, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()"); 7559 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdClearDepthStencilImage"); 7560 } else { 7561 assert(0); 7562 } 7563 lock.unlock(); 7564 if (!skip_call) 7565 dev_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, 7566 pRanges); 7567} 7568 7569VKAPI_ATTR void VKAPI_CALL 7570CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, 7571 VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) { 7572 bool skip_call = false; 7573 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7574 std::unique_lock<std::mutex> lock(global_lock); 7575 7576 auto cb_node = getCBNode(dev_data, commandBuffer); 7577 auto src_img_node = getImageNode(dev_data, srcImage); 7578 auto dst_img_node = getImageNode(dev_data, dstImage); 7579 if (cb_node && src_img_node && dst_img_node) { 7580 // Update bindings between images and cmd buffer 7581 skip_call |= addCommandBufferBindingImage(dev_data, cb_node, src_img_node, "vkCmdCopyImage"); 7582 skip_call |= addCommandBufferBindingImage(dev_data, cb_node, dst_img_node, "vkCmdCopyImage"); 7583 std::function<bool()> function = [=]() { 7584 return validate_memory_is_valid(dev_data, src_img_node->mem, "vkCmdResolveImage()", srcImage); 7585 }; 7586 cb_node->validate_functions.push_back(function); 7587 function = [=]() { 7588 set_memory_valid(dev_data, dst_img_node->mem, true, dstImage); 7589 return false; 7590 }; 7591 cb_node->validate_functions.push_back(function); 7592 7593 skip_call |= addCmd(dev_data, cb_node, CMD_RESOLVEIMAGE, "vkCmdResolveImage()"); 7594 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdResolveImage"); 7595 } else { 7596 assert(0); 7597 } 7598 lock.unlock(); 7599 if (!skip_call) 7600 dev_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, 7601 regionCount, pRegions); 7602} 7603 7604bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) { 7605 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7606 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7607 if (pCB) { 7608 pCB->eventToStageMap[event] = stageMask; 7609 } 7610 auto queue_data = dev_data->queueMap.find(queue); 7611 if (queue_data != dev_data->queueMap.end()) { 7612 queue_data->second.eventToStageMap[event] = stageMask; 7613 } 7614 return false; 7615} 7616 7617VKAPI_ATTR void VKAPI_CALL 7618CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) { 7619 bool skip_call = false; 7620 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7621 std::unique_lock<std::mutex> lock(global_lock); 7622 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7623 if (pCB) { 7624 skip_call |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()"); 7625 skip_call |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent"); 7626 pCB->events.push_back(event); 7627 if (!pCB->waitedEvents.count(event)) { 7628 pCB->writeEventsBeforeWait.push_back(event); 7629 } 7630 std::function<bool(VkQueue)> eventUpdate = 7631 std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask); 7632 pCB->eventUpdates.push_back(eventUpdate); 7633 } 7634 lock.unlock(); 7635 if (!skip_call) 7636 dev_data->device_dispatch_table->CmdSetEvent(commandBuffer, event, stageMask); 7637} 7638 7639VKAPI_ATTR void VKAPI_CALL 7640CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) { 7641 bool skip_call = false; 7642 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7643 std::unique_lock<std::mutex> lock(global_lock); 7644 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7645 if (pCB) { 7646 skip_call |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()"); 7647 skip_call |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent"); 7648 pCB->events.push_back(event); 7649 if (!pCB->waitedEvents.count(event)) { 7650 pCB->writeEventsBeforeWait.push_back(event); 7651 } 7652 std::function<bool(VkQueue)> eventUpdate = 7653 std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0)); 7654 pCB->eventUpdates.push_back(eventUpdate); 7655 } 7656 lock.unlock(); 7657 if (!skip_call) 7658 dev_data->device_dispatch_table->CmdResetEvent(commandBuffer, event, stageMask); 7659} 7660 7661static bool TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount, 7662 const VkImageMemoryBarrier *pImgMemBarriers) { 7663 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map); 7664 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer); 7665 bool skip = false; 7666 uint32_t levelCount = 0; 7667 uint32_t layerCount = 0; 7668 7669 for (uint32_t i = 0; i < memBarrierCount; ++i) { 7670 auto mem_barrier = &pImgMemBarriers[i]; 7671 if (!mem_barrier) 7672 continue; 7673 // TODO: Do not iterate over every possibility - consolidate where 7674 // possible 7675 ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image); 7676 7677 for (uint32_t j = 0; j < levelCount; j++) { 7678 uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j; 7679 for (uint32_t k = 0; k < layerCount; k++) { 7680 uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k; 7681 VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer}; 7682 IMAGE_CMD_BUF_LAYOUT_NODE node; 7683 if (!FindLayout(pCB, mem_barrier->image, sub, node)) { 7684 SetLayout(pCB, mem_barrier->image, sub, 7685 IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout)); 7686 continue; 7687 } 7688 if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) { 7689 // TODO: Set memory invalid which is in mem_tracker currently 7690 } else if (node.layout != mem_barrier->oldLayout) { 7691 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 7692 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s " 7693 "when current layout is %s.", 7694 string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout)); 7695 } 7696 SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout); 7697 } 7698 } 7699 } 7700 return skip; 7701} 7702 7703// Print readable FlagBits in FlagMask 7704static std::string string_VkAccessFlags(VkAccessFlags accessMask) { 7705 std::string result; 7706 std::string separator; 7707 7708 if (accessMask == 0) { 7709 result = "[None]"; 7710 } else { 7711 result = "["; 7712 for (auto i = 0; i < 32; i++) { 7713 if (accessMask & (1 << i)) { 7714 result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i)); 7715 separator = " | "; 7716 } 7717 } 7718 result = result + "]"; 7719 } 7720 return result; 7721} 7722 7723// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set. 7724// If required_bit is zero, accessMask must have at least one of 'optional_bits' set 7725// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions 7726static bool ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask, 7727 const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits, 7728 const char *type) { 7729 bool skip_call = false; 7730 7731 if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) { 7732 if (accessMask & ~(required_bit | optional_bits)) { 7733 // TODO: Verify against Valid Use 7734 skip_call |= 7735 log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7736 DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.", 7737 type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout)); 7738 } 7739 } else { 7740 if (!required_bit) { 7741 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7742 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d " 7743 "%s when layout is %s, unless the app has previously added a " 7744 "barrier for this transition.", 7745 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits, 7746 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout)); 7747 } else { 7748 std::string opt_bits; 7749 if (optional_bits != 0) { 7750 std::stringstream ss; 7751 ss << optional_bits; 7752 opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits); 7753 } 7754 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7755 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when " 7756 "layout is %s, unless the app has previously added a barrier for " 7757 "this transition.", 7758 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit, 7759 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout)); 7760 } 7761 } 7762 return skip_call; 7763} 7764 7765static bool ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask, 7766 const VkImageLayout &layout, const char *type) { 7767 bool skip_call = false; 7768 switch (layout) { 7769 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: { 7770 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, 7771 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type); 7772 break; 7773 } 7774 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: { 7775 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, 7776 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type); 7777 break; 7778 } 7779 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: { 7780 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type); 7781 break; 7782 } 7783 case VK_IMAGE_LAYOUT_PREINITIALIZED: { 7784 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type); 7785 break; 7786 } 7787 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: { 7788 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0, 7789 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type); 7790 break; 7791 } 7792 case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: { 7793 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0, 7794 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type); 7795 break; 7796 } 7797 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: { 7798 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type); 7799 break; 7800 } 7801 case VK_IMAGE_LAYOUT_UNDEFINED: { 7802 if (accessMask != 0) { 7803 // TODO: Verify against Valid Use section spec 7804 skip_call |= 7805 log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7806 DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.", 7807 type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout)); 7808 } 7809 break; 7810 } 7811 case VK_IMAGE_LAYOUT_GENERAL: 7812 default: { break; } 7813 } 7814 return skip_call; 7815} 7816 7817static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount, 7818 const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount, 7819 const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount, 7820 const VkImageMemoryBarrier *pImageMemBarriers) { 7821 bool skip_call = false; 7822 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map); 7823 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer); 7824 if (pCB->activeRenderPass && memBarrierCount) { 7825 if (!pCB->activeRenderPass->hasSelfDependency[pCB->activeSubpass]) { 7826 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7827 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d " 7828 "with no self dependency specified.", 7829 funcName, pCB->activeSubpass); 7830 } 7831 } 7832 for (uint32_t i = 0; i < imageMemBarrierCount; ++i) { 7833 auto mem_barrier = &pImageMemBarriers[i]; 7834 auto image_data = getImageNode(dev_data, mem_barrier->image); 7835 if (image_data) { 7836 uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex; 7837 uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex; 7838 if (image_data->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) { 7839 // srcQueueFamilyIndex and dstQueueFamilyIndex must both 7840 // be VK_QUEUE_FAMILY_IGNORED 7841 if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) { 7842 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 7843 __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS", 7844 "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of " 7845 "VK_SHARING_MODE_CONCURRENT. Src and dst " 7846 " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.", 7847 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image)); 7848 } 7849 } else { 7850 // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and 7851 // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED, 7852 // or both be a valid queue family 7853 if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) && 7854 (src_q_f_index != dst_q_f_index)) { 7855 skip_call |= 7856 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7857 DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode " 7858 "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or " 7859 "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both " 7860 "must be.", 7861 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image)); 7862 } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) && 7863 ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) || 7864 (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) { 7865 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 7866 __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS", 7867 "%s: Image 0x%" PRIx64 " was created with sharingMode " 7868 "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d" 7869 " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER 7870 "queueFamilies crated for this device.", 7871 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index, 7872 dst_q_f_index, dev_data->phys_dev_properties.queue_family_properties.size()); 7873 } 7874 } 7875 } 7876 7877 if (mem_barrier) { 7878 skip_call |= 7879 ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source"); 7880 skip_call |= 7881 ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest"); 7882 if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) { 7883 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7884 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or " 7885 "PREINITIALIZED.", 7886 funcName); 7887 } 7888 auto image_data = getImageNode(dev_data, mem_barrier->image); 7889 VkFormat format = VK_FORMAT_UNDEFINED; 7890 uint32_t arrayLayers = 0, mipLevels = 0; 7891 bool imageFound = false; 7892 if (image_data) { 7893 format = image_data->createInfo.format; 7894 arrayLayers = image_data->createInfo.arrayLayers; 7895 mipLevels = image_data->createInfo.mipLevels; 7896 imageFound = true; 7897 } else if (dev_data->device_extensions.wsi_enabled) { 7898 auto imageswap_data = getSwapchainFromImage(dev_data, mem_barrier->image); 7899 if (imageswap_data) { 7900 auto swapchain_data = getSwapchainNode(dev_data, imageswap_data); 7901 if (swapchain_data) { 7902 format = swapchain_data->createInfo.imageFormat; 7903 arrayLayers = swapchain_data->createInfo.imageArrayLayers; 7904 mipLevels = 1; 7905 imageFound = true; 7906 } 7907 } 7908 } 7909 if (imageFound) { 7910 if (vk_format_is_depth_and_stencil(format) && 7911 (!(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) || 7912 !(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT))) { 7913 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7914 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth and stencil format and thus must " 7915 "have both VK_IMAGE_ASPECT_DEPTH_BIT and " 7916 "VK_IMAGE_ASPECT_STENCIL_BIT set.", 7917 funcName); 7918 } 7919 int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS) 7920 ? 1 7921 : mem_barrier->subresourceRange.layerCount; 7922 if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) { 7923 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7924 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the " 7925 "baseArrayLayer (%d) and layerCount (%d) be less " 7926 "than or equal to the total number of layers (%d).", 7927 funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount, 7928 arrayLayers); 7929 } 7930 int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS) 7931 ? 1 7932 : mem_barrier->subresourceRange.levelCount; 7933 if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) { 7934 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7935 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel " 7936 "(%d) and levelCount (%d) be less than or equal to " 7937 "the total number of levels (%d).", 7938 funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount, 7939 mipLevels); 7940 } 7941 } 7942 } 7943 } 7944 for (uint32_t i = 0; i < bufferBarrierCount; ++i) { 7945 auto mem_barrier = &pBufferMemBarriers[i]; 7946 if (pCB->activeRenderPass) { 7947 skip_call |= 7948 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7949 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName); 7950 } 7951 if (!mem_barrier) 7952 continue; 7953 7954 // Validate buffer barrier queue family indices 7955 if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED && 7956 mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) || 7957 (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED && 7958 mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) { 7959 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7960 DRAWSTATE_INVALID_QUEUE_INDEX, "DS", 7961 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater " 7962 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.", 7963 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer), 7964 dev_data->phys_dev_properties.queue_family_properties.size()); 7965 } 7966 7967 auto buffer_node = getBufferNode(dev_data, mem_barrier->buffer); 7968 if (buffer_node) { 7969 auto buffer_size = buffer_node->memSize; 7970 if (mem_barrier->offset >= buffer_size) { 7971 skip_call |= log_msg( 7972 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7973 DRAWSTATE_INVALID_BARRIER, "DS", 7974 "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".", 7975 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer), 7976 reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(buffer_size)); 7977 } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) { 7978 skip_call |= log_msg( 7979 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7980 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64 7981 " whose sum is greater than total size 0x%" PRIx64 ".", 7982 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer), 7983 reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(mem_barrier->size), 7984 reinterpret_cast<const uint64_t &>(buffer_size)); 7985 } 7986 } 7987 } 7988 return skip_call; 7989} 7990 7991bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) { 7992 bool skip_call = false; 7993 VkPipelineStageFlags stageMask = 0; 7994 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map); 7995 for (uint32_t i = 0; i < eventCount; ++i) { 7996 auto event = pCB->events[firstEventIndex + i]; 7997 auto queue_data = dev_data->queueMap.find(queue); 7998 if (queue_data == dev_data->queueMap.end()) 7999 return false; 8000 auto event_data = queue_data->second.eventToStageMap.find(event); 8001 if (event_data != queue_data->second.eventToStageMap.end()) { 8002 stageMask |= event_data->second; 8003 } else { 8004 auto global_event_data = dev_data->eventMap.find(event); 8005 if (global_event_data == dev_data->eventMap.end()) { 8006 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT, 8007 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS", 8008 "Event 0x%" PRIx64 " cannot be waited on if it has never been set.", 8009 reinterpret_cast<const uint64_t &>(event)); 8010 } else { 8011 stageMask |= global_event_data->second.stageMask; 8012 } 8013 } 8014 } 8015 // TODO: Need to validate that host_bit is only set if set event is called 8016 // but set event can be called at any time. 8017 if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) { 8018 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8019 DRAWSTATE_INVALID_EVENT, "DS", "Submitting cmdbuffer with call to VkCmdWaitEvents " 8020 "using srcStageMask 0x%X which must be the bitwise " 8021 "OR of the stageMask parameters used in calls to " 8022 "vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if " 8023 "used with vkSetEvent but instead is 0x%X.", 8024 sourceStageMask, stageMask); 8025 } 8026 return skip_call; 8027} 8028 8029VKAPI_ATTR void VKAPI_CALL 8030CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask, 8031 VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, 8032 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, 8033 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { 8034 bool skip_call = false; 8035 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8036 std::unique_lock<std::mutex> lock(global_lock); 8037 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8038 if (pCB) { 8039 auto firstEventIndex = pCB->events.size(); 8040 for (uint32_t i = 0; i < eventCount; ++i) { 8041 pCB->waitedEvents.insert(pEvents[i]); 8042 pCB->events.push_back(pEvents[i]); 8043 } 8044 std::function<bool(VkQueue)> eventUpdate = 8045 std::bind(validateEventStageMask, std::placeholders::_1, pCB, eventCount, firstEventIndex, sourceStageMask); 8046 pCB->eventUpdates.push_back(eventUpdate); 8047 if (pCB->state == CB_RECORDING) { 8048 skip_call |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()"); 8049 } else { 8050 skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()"); 8051 } 8052 skip_call |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers); 8053 skip_call |= 8054 ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, 8055 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); 8056 } 8057 lock.unlock(); 8058 if (!skip_call) 8059 dev_data->device_dispatch_table->CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask, 8060 memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, 8061 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); 8062} 8063 8064VKAPI_ATTR void VKAPI_CALL 8065CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, 8066 VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, 8067 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, 8068 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { 8069 bool skip_call = false; 8070 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8071 std::unique_lock<std::mutex> lock(global_lock); 8072 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8073 if (pCB) { 8074 skip_call |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()"); 8075 skip_call |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers); 8076 skip_call |= 8077 ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, 8078 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); 8079 } 8080 lock.unlock(); 8081 if (!skip_call) 8082 dev_data->device_dispatch_table->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, 8083 memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, 8084 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); 8085} 8086 8087bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) { 8088 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8089 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8090 if (pCB) { 8091 pCB->queryToStateMap[object] = value; 8092 } 8093 auto queue_data = dev_data->queueMap.find(queue); 8094 if (queue_data != dev_data->queueMap.end()) { 8095 queue_data->second.queryToStateMap[object] = value; 8096 } 8097 return false; 8098} 8099 8100VKAPI_ATTR void VKAPI_CALL 8101CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) { 8102 bool skip_call = false; 8103 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8104 std::unique_lock<std::mutex> lock(global_lock); 8105 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8106 if (pCB) { 8107 QueryObject query = {queryPool, slot}; 8108 pCB->activeQueries.insert(query); 8109 if (!pCB->startedQueries.count(query)) { 8110 pCB->startedQueries.insert(query); 8111 } 8112 skip_call |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()"); 8113 } 8114 lock.unlock(); 8115 if (!skip_call) 8116 dev_data->device_dispatch_table->CmdBeginQuery(commandBuffer, queryPool, slot, flags); 8117} 8118 8119VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) { 8120 bool skip_call = false; 8121 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8122 std::unique_lock<std::mutex> lock(global_lock); 8123 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8124 if (pCB) { 8125 QueryObject query = {queryPool, slot}; 8126 if (!pCB->activeQueries.count(query)) { 8127 skip_call |= 8128 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8129 DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d", 8130 (uint64_t)(queryPool), slot); 8131 } else { 8132 pCB->activeQueries.erase(query); 8133 } 8134 std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true); 8135 pCB->queryUpdates.push_back(queryUpdate); 8136 if (pCB->state == CB_RECORDING) { 8137 skip_call |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()"); 8138 } else { 8139 skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()"); 8140 } 8141 } 8142 lock.unlock(); 8143 if (!skip_call) 8144 dev_data->device_dispatch_table->CmdEndQuery(commandBuffer, queryPool, slot); 8145} 8146 8147VKAPI_ATTR void VKAPI_CALL 8148CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) { 8149 bool skip_call = false; 8150 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8151 std::unique_lock<std::mutex> lock(global_lock); 8152 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8153 if (pCB) { 8154 for (uint32_t i = 0; i < queryCount; i++) { 8155 QueryObject query = {queryPool, firstQuery + i}; 8156 pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents; 8157 std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, false); 8158 pCB->queryUpdates.push_back(queryUpdate); 8159 } 8160 if (pCB->state == CB_RECORDING) { 8161 skip_call |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()"); 8162 } else { 8163 skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()"); 8164 } 8165 skip_call |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool"); 8166 } 8167 lock.unlock(); 8168 if (!skip_call) 8169 dev_data->device_dispatch_table->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount); 8170} 8171 8172bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t queryCount, uint32_t firstQuery) { 8173 bool skip_call = false; 8174 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map); 8175 auto queue_data = dev_data->queueMap.find(queue); 8176 if (queue_data == dev_data->queueMap.end()) 8177 return false; 8178 for (uint32_t i = 0; i < queryCount; i++) { 8179 QueryObject query = {queryPool, firstQuery + i}; 8180 auto query_data = queue_data->second.queryToStateMap.find(query); 8181 bool fail = false; 8182 if (query_data != queue_data->second.queryToStateMap.end()) { 8183 if (!query_data->second) { 8184 fail = true; 8185 } 8186 } else { 8187 auto global_query_data = dev_data->queryToStateMap.find(query); 8188 if (global_query_data != dev_data->queryToStateMap.end()) { 8189 if (!global_query_data->second) { 8190 fail = true; 8191 } 8192 } else { 8193 fail = true; 8194 } 8195 } 8196 if (fail) { 8197 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8198 DRAWSTATE_INVALID_QUERY, "DS", 8199 "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d", 8200 reinterpret_cast<uint64_t &>(queryPool), firstQuery + i); 8201 } 8202 } 8203 return skip_call; 8204} 8205 8206VKAPI_ATTR void VKAPI_CALL 8207CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, 8208 VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) { 8209 bool skip_call = false; 8210 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8211 std::unique_lock<std::mutex> lock(global_lock); 8212 8213 auto cb_node = getCBNode(dev_data, commandBuffer); 8214 auto dst_buff_node = getBufferNode(dev_data, dstBuffer); 8215 if (cb_node && dst_buff_node) { 8216 // Update bindings between buffer and cmd buffer 8217 skip_call |= addCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node, "vkCmdCopyQueryPoolResults"); 8218 // Validate that DST buffer has correct usage flags set 8219 skip_call |= validateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, 8220 "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); 8221 std::function<bool()> function = [=]() { 8222 set_memory_valid(dev_data, dst_buff_node->mem, true); 8223 return false; 8224 }; 8225 cb_node->validate_functions.push_back(function); 8226 std::function<bool(VkQueue)> queryUpdate = 8227 std::bind(validateQuery, std::placeholders::_1, cb_node, queryPool, queryCount, firstQuery); 8228 cb_node->queryUpdates.push_back(queryUpdate); 8229 if (cb_node->state == CB_RECORDING) { 8230 skip_call |= addCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()"); 8231 } else { 8232 skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()"); 8233 } 8234 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyQueryPoolResults"); 8235 } else { 8236 assert(0); 8237 } 8238 lock.unlock(); 8239 if (!skip_call) 8240 dev_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, 8241 dstOffset, stride, flags); 8242} 8243 8244VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, 8245 VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, 8246 const void *pValues) { 8247 bool skip_call = false; 8248 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8249 std::unique_lock<std::mutex> lock(global_lock); 8250 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8251 if (pCB) { 8252 if (pCB->state == CB_RECORDING) { 8253 skip_call |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()"); 8254 } else { 8255 skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()"); 8256 } 8257 } 8258 skip_call |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()"); 8259 if (0 == stageFlags) { 8260 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8261 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() call has no stageFlags set."); 8262 } 8263 8264 // Check if push constant update is within any of the ranges with the same stage flags specified in pipeline layout. 8265 auto pipeline_layout = getPipelineLayout(dev_data, layout); 8266 if (!pipeline_layout) { 8267 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8268 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() Pipeline Layout 0x%" PRIx64 " not found.", 8269 (uint64_t)layout); 8270 } else { 8271 // Coalesce adjacent/overlapping pipeline ranges before checking to see if incoming range is 8272 // contained in the pipeline ranges. 8273 // Build a {start, end} span list for ranges with matching stage flags. 8274 const auto &ranges = pipeline_layout->pushConstantRanges; 8275 struct span { 8276 uint32_t start; 8277 uint32_t end; 8278 }; 8279 std::vector<span> spans; 8280 spans.reserve(ranges.size()); 8281 for (const auto &iter : ranges) { 8282 if (iter.stageFlags == stageFlags) { 8283 spans.push_back({iter.offset, iter.offset + iter.size}); 8284 } 8285 } 8286 if (spans.size() == 0) { 8287 // There were no ranges that matched the stageFlags. 8288 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8289 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", 8290 "vkCmdPushConstants() stageFlags = 0x%" PRIx32 " do not match " 8291 "the stageFlags in any of the ranges in pipeline layout 0x%" PRIx64 ".", 8292 (uint32_t)stageFlags, (uint64_t)layout); 8293 } else { 8294 // Sort span list by start value. 8295 struct comparer { 8296 bool operator()(struct span i, struct span j) { return i.start < j.start; } 8297 } my_comparer; 8298 std::sort(spans.begin(), spans.end(), my_comparer); 8299 8300 // Examine two spans at a time. 8301 std::vector<span>::iterator current = spans.begin(); 8302 std::vector<span>::iterator next = current + 1; 8303 while (next != spans.end()) { 8304 if (current->end < next->start) { 8305 // There is a gap; cannot coalesce. Move to the next two spans. 8306 ++current; 8307 ++next; 8308 } else { 8309 // Coalesce the two spans. The start of the next span 8310 // is within the current span, so pick the larger of 8311 // the end values to extend the current span. 8312 // Then delete the next span and set next to the span after it. 8313 current->end = max(current->end, next->end); 8314 next = spans.erase(next); 8315 } 8316 } 8317 8318 // Now we can check if the incoming range is within any of the spans. 8319 bool contained_in_a_range = false; 8320 for (uint32_t i = 0; i < spans.size(); ++i) { 8321 if ((offset >= spans[i].start) && ((uint64_t)offset + (uint64_t)size <= (uint64_t)spans[i].end)) { 8322 contained_in_a_range = true; 8323 break; 8324 } 8325 } 8326 if (!contained_in_a_range) { 8327 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 8328 __LINE__, DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", 8329 "vkCmdPushConstants() Push constant range [%d, %d) " 8330 "with stageFlags = 0x%" PRIx32 " " 8331 "not within flag-matching ranges in pipeline layout 0x%" PRIx64 ".", 8332 offset, offset + size, (uint32_t)stageFlags, (uint64_t)layout); 8333 } 8334 } 8335 } 8336 lock.unlock(); 8337 if (!skip_call) 8338 dev_data->device_dispatch_table->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues); 8339} 8340 8341VKAPI_ATTR void VKAPI_CALL 8342CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) { 8343 bool skip_call = false; 8344 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8345 std::unique_lock<std::mutex> lock(global_lock); 8346 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8347 if (pCB) { 8348 QueryObject query = {queryPool, slot}; 8349 std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true); 8350 pCB->queryUpdates.push_back(queryUpdate); 8351 if (pCB->state == CB_RECORDING) { 8352 skip_call |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()"); 8353 } else { 8354 skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()"); 8355 } 8356 } 8357 lock.unlock(); 8358 if (!skip_call) 8359 dev_data->device_dispatch_table->CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot); 8360} 8361 8362static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments, 8363 const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag) { 8364 bool skip_call = false; 8365 8366 for (uint32_t attach = 0; attach < count; attach++) { 8367 if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) { 8368 // Attachment counts are verified elsewhere, but prevent an invalid access 8369 if (attachments[attach].attachment < fbci->attachmentCount) { 8370 const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment]; 8371 VkImageViewCreateInfo *ivci = getImageViewData(dev_data, *image_view); 8372 if (ivci != nullptr) { 8373 const VkImageCreateInfo *ici = &getImageNode(dev_data, ivci->image)->createInfo; 8374 if (ici != nullptr) { 8375 if ((ici->usage & usage_flag) == 0) { 8376 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 8377 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_USAGE, "DS", 8378 "vkCreateFramebuffer: Framebuffer Attachment (%d) conflicts with the image's " 8379 "IMAGE_USAGE flags (%s).", 8380 attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag)); 8381 } 8382 } 8383 } 8384 } 8385 } 8386 } 8387 return skip_call; 8388} 8389 8390// Validate VkFramebufferCreateInfo which includes: 8391// 1. attachmentCount equals renderPass attachmentCount 8392// 2. corresponding framebuffer and renderpass attachments have matching formats 8393// 3. corresponding framebuffer and renderpass attachments have matching sample counts 8394// 4. fb attachments only have a single mip level 8395// 5. fb attachment dimensions are each at least as large as the fb 8396// 6. fb attachments use idenity swizzle 8397// 7. fb attachments used by renderPass for color/input/ds have correct usage bit set 8398// 8. fb dimensions are within physical device limits 8399static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) { 8400 bool skip_call = false; 8401 8402 auto rp_node = getRenderPass(dev_data, pCreateInfo->renderPass); 8403 if (rp_node) { 8404 const VkRenderPassCreateInfo *rpci = rp_node->pCreateInfo; 8405 if (rpci->attachmentCount != pCreateInfo->attachmentCount) { 8406 skip_call |= log_msg( 8407 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, 8408 reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS", 8409 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount of %u of " 8410 "renderPass (0x%" PRIxLEAST64 ") being used to create Framebuffer.", 8411 pCreateInfo->attachmentCount, rpci->attachmentCount, reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass)); 8412 } else { 8413 // attachmentCounts match, so make sure corresponding attachment details line up 8414 const VkImageView *image_views = pCreateInfo->pAttachments; 8415 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { 8416 VkImageViewCreateInfo *ivci = getImageViewData(dev_data, image_views[i]); 8417 if (ivci->format != rpci->pAttachments[i].format) { 8418 skip_call |= log_msg( 8419 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, 8420 reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, 8421 "DS", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not match " 8422 "the format of " 8423 "%s used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 ").", 8424 i, string_VkFormat(ivci->format), string_VkFormat(rpci->pAttachments[i].format), 8425 reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass)); 8426 } 8427 const VkImageCreateInfo *ici = &getImageNode(dev_data, ivci->image)->createInfo; 8428 if (ici->samples != rpci->pAttachments[i].samples) { 8429 skip_call |= log_msg( 8430 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, 8431 reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, 8432 "DS", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match " 8433 "the %s samples used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 ").", 8434 i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples), 8435 reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass)); 8436 } 8437 // Verify that view only has a single mip level 8438 if (ivci->subresourceRange.levelCount != 1) { 8439 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 8440 __LINE__, DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS", 8441 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u " 8442 "but only a single mip level (levelCount == 1) is allowed when creating a Framebuffer.", 8443 i, ivci->subresourceRange.levelCount); 8444 } 8445 const uint32_t mip_level = ivci->subresourceRange.baseMipLevel; 8446 uint32_t mip_width = max(1u, ici->extent.width >> mip_level); 8447 uint32_t mip_height = max(1u, ici->extent.height >> mip_level); 8448 if ((ivci->subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) || 8449 (mip_height < pCreateInfo->height)) { 8450 skip_call |= 8451 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__, 8452 DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS", 8453 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions smaller " 8454 "than the corresponding " 8455 "framebuffer dimensions. Attachment dimensions must be at least as large. Here are the respective " 8456 "dimensions for " 8457 "attachment #%u, framebuffer:\n" 8458 "width: %u, %u\n" 8459 "height: %u, %u\n" 8460 "layerCount: %u, %u\n", 8461 i, ivci->subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height, 8462 pCreateInfo->height, ivci->subresourceRange.layerCount, pCreateInfo->layers); 8463 } 8464 if (((ivci->components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci->components.r != VK_COMPONENT_SWIZZLE_R)) || 8465 ((ivci->components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci->components.g != VK_COMPONENT_SWIZZLE_G)) || 8466 ((ivci->components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci->components.b != VK_COMPONENT_SWIZZLE_B)) || 8467 ((ivci->components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci->components.a != VK_COMPONENT_SWIZZLE_A))) { 8468 skip_call |= log_msg( 8469 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__, 8470 DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS", 8471 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All framebuffer " 8472 "attachments must have been created with the identity swizzle. Here are the actual swizzle values:\n" 8473 "r swizzle = %s\n" 8474 "g swizzle = %s\n" 8475 "b swizzle = %s\n" 8476 "a swizzle = %s\n", 8477 i, string_VkComponentSwizzle(ivci->components.r), string_VkComponentSwizzle(ivci->components.g), 8478 string_VkComponentSwizzle(ivci->components.b), string_VkComponentSwizzle(ivci->components.a)); 8479 } 8480 } 8481 } 8482 // Verify correct attachment usage flags 8483 for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) { 8484 // Verify input attachments: 8485 skip_call |= MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount, 8486 rpci->pSubpasses[subpass].pInputAttachments, pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT); 8487 // Verify color attachments: 8488 skip_call |= MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount, 8489 rpci->pSubpasses[subpass].pColorAttachments, pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT); 8490 // Verify depth/stencil attachments: 8491 if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) { 8492 skip_call |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo, 8493 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT); 8494 } 8495 } 8496 } else { 8497 skip_call |= 8498 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, 8499 reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 8500 "vkCreateFramebuffer(): Attempt to create framebuffer with invalid renderPass (0x%" PRIxLEAST64 ").", 8501 reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass)); 8502 } 8503 // Verify FB dimensions are within physical device limits 8504 if ((pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) || 8505 (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) || 8506 (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers)) { 8507 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__, 8508 DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS", 8509 "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo dimensions exceed physical device limits. " 8510 "Here are the respective dimensions: requested, device max:\n" 8511 "width: %u, %u\n" 8512 "height: %u, %u\n" 8513 "layerCount: %u, %u\n", 8514 pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth, 8515 pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight, 8516 pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers); 8517 } 8518 return skip_call; 8519} 8520 8521// Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object 8522// Return true if an error is encountered and callback returns true to skip call down chain 8523// false indicates that call down chain should proceed 8524static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) { 8525 // TODO : Verify that renderPass FB is created with is compatible with FB 8526 bool skip_call = false; 8527 skip_call |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo); 8528 return skip_call; 8529} 8530 8531// CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object 8532static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) { 8533 // Shadow create info and store in map 8534 std::unique_ptr<FRAMEBUFFER_NODE> fb_node( 8535 new FRAMEBUFFER_NODE(fb, pCreateInfo, dev_data->renderPassMap[pCreateInfo->renderPass]->pCreateInfo)); 8536 8537 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { 8538 VkImageView view = pCreateInfo->pAttachments[i]; 8539 auto view_data = getImageViewData(dev_data, view); 8540 if (!view_data) { 8541 continue; 8542 } 8543 MT_FB_ATTACHMENT_INFO fb_info; 8544 fb_info.mem = getImageNode(dev_data, view_data->image)->mem; 8545 fb_info.image = view_data->image; 8546 fb_node->attachments.push_back(fb_info); 8547 } 8548 dev_data->frameBufferMap[fb] = std::move(fb_node); 8549} 8550 8551VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo, 8552 const VkAllocationCallbacks *pAllocator, 8553 VkFramebuffer *pFramebuffer) { 8554 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 8555 std::unique_lock<std::mutex> lock(global_lock); 8556 bool skip_call = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo); 8557 lock.unlock(); 8558 8559 if (skip_call) 8560 return VK_ERROR_VALIDATION_FAILED_EXT; 8561 8562 VkResult result = dev_data->device_dispatch_table->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer); 8563 8564 if (VK_SUCCESS == result) { 8565 lock.lock(); 8566 PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer); 8567 lock.unlock(); 8568 } 8569 return result; 8570} 8571 8572static bool FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node, 8573 std::unordered_set<uint32_t> &processed_nodes) { 8574 // If we have already checked this node we have not found a dependency path so return false. 8575 if (processed_nodes.count(index)) 8576 return false; 8577 processed_nodes.insert(index); 8578 const DAGNode &node = subpass_to_node[index]; 8579 // Look for a dependency path. If one exists return true else recurse on the previous nodes. 8580 if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) { 8581 for (auto elem : node.prev) { 8582 if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) 8583 return true; 8584 } 8585 } else { 8586 return true; 8587 } 8588 return false; 8589} 8590 8591static bool CheckDependencyExists(const layer_data *my_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses, 8592 const std::vector<DAGNode> &subpass_to_node, bool &skip_call) { 8593 bool result = true; 8594 // Loop through all subpasses that share the same attachment and make sure a dependency exists 8595 for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) { 8596 if (static_cast<uint32_t>(subpass) == dependent_subpasses[k]) 8597 continue; 8598 const DAGNode &node = subpass_to_node[subpass]; 8599 // Check for a specified dependency between the two nodes. If one exists we are done. 8600 auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]); 8601 auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]); 8602 if (prev_elem == node.prev.end() && next_elem == node.next.end()) { 8603 // If no dependency exits an implicit dependency still might. If not, throw an error. 8604 std::unordered_set<uint32_t> processed_nodes; 8605 if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) || 8606 FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) { 8607 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 8608 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 8609 "A dependency between subpasses %d and %d must exist but one is not specified.", subpass, 8610 dependent_subpasses[k]); 8611 result = false; 8612 } 8613 } 8614 } 8615 return result; 8616} 8617 8618static bool CheckPreserved(const layer_data *my_data, const VkRenderPassCreateInfo *pCreateInfo, const int index, 8619 const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip_call) { 8620 const DAGNode &node = subpass_to_node[index]; 8621 // If this node writes to the attachment return true as next nodes need to preserve the attachment. 8622 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index]; 8623 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { 8624 if (attachment == subpass.pColorAttachments[j].attachment) 8625 return true; 8626 } 8627 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { 8628 if (attachment == subpass.pDepthStencilAttachment->attachment) 8629 return true; 8630 } 8631 bool result = false; 8632 // Loop through previous nodes and see if any of them write to the attachment. 8633 for (auto elem : node.prev) { 8634 result |= CheckPreserved(my_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call); 8635 } 8636 // If the attachment was written to by a previous node than this node needs to preserve it. 8637 if (result && depth > 0) { 8638 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index]; 8639 bool has_preserved = false; 8640 for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) { 8641 if (subpass.pPreserveAttachments[j] == attachment) { 8642 has_preserved = true; 8643 break; 8644 } 8645 } 8646 if (!has_preserved) { 8647 skip_call |= 8648 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8649 DRAWSTATE_INVALID_RENDERPASS, "DS", 8650 "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index); 8651 } 8652 } 8653 return result; 8654} 8655 8656template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) { 8657 return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) || 8658 ((offset1 > offset2) && (offset1 < (offset2 + size2))); 8659} 8660 8661bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) { 8662 return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) && 8663 isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount)); 8664} 8665 8666static bool ValidateDependencies(const layer_data *my_data, FRAMEBUFFER_NODE const * framebuffer, 8667 RENDER_PASS_NODE const * renderPass) { 8668 bool skip_call = false; 8669 const safe_VkFramebufferCreateInfo *pFramebufferInfo = &framebuffer->createInfo; 8670 const VkRenderPassCreateInfo *pCreateInfo = renderPass->pCreateInfo; 8671 auto const & subpass_to_node = renderPass->subpassToNode; 8672 std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount); 8673 std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount); 8674 std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount); 8675 // Find overlapping attachments 8676 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { 8677 for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) { 8678 VkImageView viewi = pFramebufferInfo->pAttachments[i]; 8679 VkImageView viewj = pFramebufferInfo->pAttachments[j]; 8680 if (viewi == viewj) { 8681 overlapping_attachments[i].push_back(j); 8682 overlapping_attachments[j].push_back(i); 8683 continue; 8684 } 8685 auto view_data_i = getImageViewData(my_data, viewi); 8686 auto view_data_j = getImageViewData(my_data, viewj); 8687 if (!view_data_i || !view_data_j) { 8688 continue; 8689 } 8690 if (view_data_i->image == view_data_j->image && 8691 isRegionOverlapping(view_data_i->subresourceRange, view_data_j->subresourceRange)) { 8692 overlapping_attachments[i].push_back(j); 8693 overlapping_attachments[j].push_back(i); 8694 continue; 8695 } 8696 auto image_data_i = getImageNode(my_data, view_data_i->image); 8697 auto image_data_j = getImageNode(my_data, view_data_j->image); 8698 if (!image_data_i || !image_data_j) { 8699 continue; 8700 } 8701 if (image_data_i->mem == image_data_j->mem && isRangeOverlapping(image_data_i->memOffset, image_data_i->memSize, 8702 image_data_j->memOffset, image_data_j->memSize)) { 8703 overlapping_attachments[i].push_back(j); 8704 overlapping_attachments[j].push_back(i); 8705 } 8706 } 8707 } 8708 for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) { 8709 uint32_t attachment = i; 8710 for (auto other_attachment : overlapping_attachments[i]) { 8711 if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) { 8712 skip_call |= 8713 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8714 DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't " 8715 "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.", 8716 attachment, other_attachment); 8717 } 8718 if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) { 8719 skip_call |= 8720 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8721 DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't " 8722 "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.", 8723 other_attachment, attachment); 8724 } 8725 } 8726 } 8727 // Find for each attachment the subpasses that use them. 8728 unordered_set<uint32_t> attachmentIndices; 8729 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { 8730 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i]; 8731 attachmentIndices.clear(); 8732 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { 8733 uint32_t attachment = subpass.pInputAttachments[j].attachment; 8734 if (attachment == VK_ATTACHMENT_UNUSED) 8735 continue; 8736 input_attachment_to_subpass[attachment].push_back(i); 8737 for (auto overlapping_attachment : overlapping_attachments[attachment]) { 8738 input_attachment_to_subpass[overlapping_attachment].push_back(i); 8739 } 8740 } 8741 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { 8742 uint32_t attachment = subpass.pColorAttachments[j].attachment; 8743 if (attachment == VK_ATTACHMENT_UNUSED) 8744 continue; 8745 output_attachment_to_subpass[attachment].push_back(i); 8746 for (auto overlapping_attachment : overlapping_attachments[attachment]) { 8747 output_attachment_to_subpass[overlapping_attachment].push_back(i); 8748 } 8749 attachmentIndices.insert(attachment); 8750 } 8751 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { 8752 uint32_t attachment = subpass.pDepthStencilAttachment->attachment; 8753 output_attachment_to_subpass[attachment].push_back(i); 8754 for (auto overlapping_attachment : overlapping_attachments[attachment]) { 8755 output_attachment_to_subpass[overlapping_attachment].push_back(i); 8756 } 8757 8758 if (attachmentIndices.count(attachment)) { 8759 skip_call |= 8760 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 8761 0, __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 8762 "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", 8763 attachment, i); 8764 } 8765 } 8766 } 8767 // If there is a dependency needed make sure one exists 8768 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { 8769 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i]; 8770 // If the attachment is an input then all subpasses that output must have a dependency relationship 8771 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { 8772 uint32_t attachment = subpass.pInputAttachments[j].attachment; 8773 if (attachment == VK_ATTACHMENT_UNUSED) 8774 continue; 8775 CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call); 8776 } 8777 // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship 8778 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { 8779 uint32_t attachment = subpass.pColorAttachments[j].attachment; 8780 if (attachment == VK_ATTACHMENT_UNUSED) 8781 continue; 8782 CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call); 8783 CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call); 8784 } 8785 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { 8786 const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment; 8787 CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call); 8788 CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call); 8789 } 8790 } 8791 // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was 8792 // written. 8793 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { 8794 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i]; 8795 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { 8796 CheckPreserved(my_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call); 8797 } 8798 } 8799 return skip_call; 8800} 8801// ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the 8802// VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that 8803// READ_ONLY layout attachments don't have CLEAR as their loadOp. 8804static bool ValidateLayoutVsAttachmentDescription(debug_report_data *report_data, const VkImageLayout first_layout, 8805 const uint32_t attachment, 8806 const VkAttachmentDescription &attachment_description) { 8807 bool skip_call = false; 8808 // Verify that initial loadOp on READ_ONLY attachments is not CLEAR 8809 if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) { 8810 if ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) || 8811 (first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)) { 8812 skip_call |= 8813 log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 8814 VkDebugReportObjectTypeEXT(0), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 8815 "Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout)); 8816 } 8817 } 8818 return skip_call; 8819} 8820 8821static bool ValidateLayouts(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) { 8822 bool skip = false; 8823 8824 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { 8825 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i]; 8826 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { 8827 auto attach_index = subpass.pInputAttachments[j].attachment; 8828 if (attach_index == VK_ATTACHMENT_UNUSED) 8829 continue; 8830 8831 if (subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL && 8832 subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) { 8833 if (subpass.pInputAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) { 8834 // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance 8835 skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, 8836 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 8837 "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL."); 8838 } else { 8839 skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8840 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 8841 "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.", 8842 string_VkImageLayout(subpass.pInputAttachments[j].layout)); 8843 } 8844 } 8845 skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pInputAttachments[j].layout, attach_index, 8846 pCreateInfo->pAttachments[attach_index]); 8847 } 8848 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { 8849 auto attach_index = subpass.pColorAttachments[j].attachment; 8850 if (attach_index == VK_ATTACHMENT_UNUSED) 8851 continue; 8852 8853 if (subpass.pColorAttachments[j].layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) { 8854 if (subpass.pColorAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) { 8855 // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance 8856 skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, 8857 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 8858 "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL."); 8859 } else { 8860 skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8861 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 8862 "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.", 8863 string_VkImageLayout(subpass.pColorAttachments[j].layout)); 8864 } 8865 } 8866 skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pColorAttachments[j].layout, attach_index, 8867 pCreateInfo->pAttachments[attach_index]); 8868 } 8869 if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) { 8870 if (subpass.pDepthStencilAttachment->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) { 8871 if (subpass.pDepthStencilAttachment->layout == VK_IMAGE_LAYOUT_GENERAL) { 8872 // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance 8873 skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, 8874 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 8875 "Layout for depth attachment is GENERAL but should be DEPTH_STENCIL_ATTACHMENT_OPTIMAL."); 8876 } else { 8877 skip |= 8878 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8879 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 8880 "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL or GENERAL.", 8881 string_VkImageLayout(subpass.pDepthStencilAttachment->layout)); 8882 } 8883 } 8884 auto attach_index = subpass.pDepthStencilAttachment->attachment; 8885 skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pDepthStencilAttachment->layout, 8886 attach_index, pCreateInfo->pAttachments[attach_index]); 8887 } 8888 } 8889 return skip; 8890} 8891 8892static bool CreatePassDAG(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo, 8893 std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) { 8894 bool skip_call = false; 8895 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { 8896 DAGNode &subpass_node = subpass_to_node[i]; 8897 subpass_node.pass = i; 8898 } 8899 for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) { 8900 const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i]; 8901 if (dependency.srcSubpass > dependency.dstSubpass && dependency.srcSubpass != VK_SUBPASS_EXTERNAL && 8902 dependency.dstSubpass != VK_SUBPASS_EXTERNAL) { 8903 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8904 DRAWSTATE_INVALID_RENDERPASS, "DS", 8905 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass."); 8906 } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL && dependency.dstSubpass == VK_SUBPASS_EXTERNAL) { 8907 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8908 DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external."); 8909 } else if (dependency.srcSubpass == dependency.dstSubpass) { 8910 has_self_dependency[dependency.srcSubpass] = true; 8911 } 8912 if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL) { 8913 subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass); 8914 } 8915 if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL) { 8916 subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass); 8917 } 8918 } 8919 return skip_call; 8920} 8921 8922 8923VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo, 8924 const VkAllocationCallbacks *pAllocator, 8925 VkShaderModule *pShaderModule) { 8926 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 8927 bool skip_call = false; 8928 8929 /* Use SPIRV-Tools validator to try and catch any issues with the module itself */ 8930 spv_context ctx = spvContextCreate(SPV_ENV_VULKAN_1_0); 8931 spv_const_binary_t binary { pCreateInfo->pCode, pCreateInfo->codeSize / sizeof(uint32_t) }; 8932 spv_diagnostic diag = nullptr; 8933 8934 auto result = spvValidate(ctx, &binary, &diag); 8935 if (result != SPV_SUCCESS) { 8936 skip_call |= log_msg(my_data->report_data, 8937 result == SPV_WARNING ? VK_DEBUG_REPORT_WARNING_BIT_EXT : VK_DEBUG_REPORT_ERROR_BIT_EXT, 8938 VkDebugReportObjectTypeEXT(0), 0, 8939 __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC", "SPIR-V module not valid: %s", 8940 diag && diag->error ? diag->error : "(no error text)"); 8941 } 8942 8943 spvDiagnosticDestroy(diag); 8944 spvContextDestroy(ctx); 8945 8946 if (skip_call) 8947 return VK_ERROR_VALIDATION_FAILED_EXT; 8948 8949 VkResult res = my_data->device_dispatch_table->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule); 8950 8951 if (res == VK_SUCCESS) { 8952 std::lock_guard<std::mutex> lock(global_lock); 8953 my_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo)); 8954 } 8955 return res; 8956} 8957 8958static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) { 8959 bool skip_call = false; 8960 if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) { 8961 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8962 DRAWSTATE_INVALID_ATTACHMENT_INDEX, "DS", 8963 "CreateRenderPass: %s attachment %d cannot be greater than the total number of attachments %d.", 8964 type, attachment, attachment_count); 8965 } 8966 return skip_call; 8967} 8968 8969static bool IsPowerOfTwo(unsigned x) { 8970 return x && !(x & (x-1)); 8971} 8972 8973static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) { 8974 bool skip_call = false; 8975 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { 8976 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i]; 8977 if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) { 8978 skip_call |= 8979 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8980 DRAWSTATE_INVALID_RENDERPASS, "DS", 8981 "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", i); 8982 } 8983 for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) { 8984 uint32_t attachment = subpass.pPreserveAttachments[j]; 8985 if (attachment == VK_ATTACHMENT_UNUSED) { 8986 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 8987 __LINE__, DRAWSTATE_INVALID_ATTACHMENT_INDEX, "DS", 8988 "CreateRenderPass: Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", j); 8989 } else { 8990 skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve"); 8991 } 8992 } 8993 8994 auto subpass_performs_resolve = subpass.pResolveAttachments && std::any_of( 8995 subpass.pResolveAttachments, subpass.pResolveAttachments + subpass.colorAttachmentCount, 8996 [](VkAttachmentReference ref) { return ref.attachment != VK_ATTACHMENT_UNUSED; }); 8997 8998 unsigned sample_count = 0; 8999 9000 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { 9001 uint32_t attachment; 9002 if (subpass.pResolveAttachments) { 9003 attachment = subpass.pResolveAttachments[j].attachment; 9004 skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve"); 9005 9006 if (!skip_call && attachment != VK_ATTACHMENT_UNUSED && 9007 pCreateInfo->pAttachments[attachment].samples != VK_SAMPLE_COUNT_1_BIT) { 9008 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 9009 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 9010 "CreateRenderPass: Subpass %u requests multisample resolve into attachment %u, " 9011 "which must have VK_SAMPLE_COUNT_1_BIT but has %s", 9012 i, attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples)); 9013 } 9014 } 9015 attachment = subpass.pColorAttachments[j].attachment; 9016 skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color"); 9017 9018 if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) { 9019 sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples; 9020 9021 if (subpass_performs_resolve && 9022 pCreateInfo->pAttachments[attachment].samples == VK_SAMPLE_COUNT_1_BIT) { 9023 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 9024 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 9025 "CreateRenderPass: Subpass %u requests multisample resolve from attachment %u " 9026 "which has VK_SAMPLE_COUNT_1_BIT", 9027 i, attachment); 9028 } 9029 } 9030 } 9031 9032 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { 9033 uint32_t attachment = subpass.pDepthStencilAttachment->attachment; 9034 skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil"); 9035 9036 if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) { 9037 sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples; 9038 } 9039 } 9040 9041 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { 9042 uint32_t attachment = subpass.pInputAttachments[j].attachment; 9043 skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input"); 9044 } 9045 9046 if (sample_count && !IsPowerOfTwo(sample_count)) { 9047 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 9048 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 9049 "CreateRenderPass: Subpass %u attempts to render to " 9050 "attachments with inconsistent sample counts", 9051 i); 9052 } 9053 } 9054 return skip_call; 9055} 9056 9057VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo, 9058 const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) { 9059 bool skip_call = false; 9060 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 9061 9062 std::unique_lock<std::mutex> lock(global_lock); 9063 9064 skip_call |= ValidateLayouts(dev_data, device, pCreateInfo); 9065 // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with 9066 // ValidateLayouts. 9067 skip_call |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo); 9068 lock.unlock(); 9069 9070 if (skip_call) { 9071 return VK_ERROR_VALIDATION_FAILED_EXT; 9072 } 9073 9074 VkResult result = dev_data->device_dispatch_table->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass); 9075 9076 if (VK_SUCCESS == result) { 9077 lock.lock(); 9078 9079 std::vector<bool> has_self_dependency(pCreateInfo->subpassCount); 9080 std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount); 9081 skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency); 9082 9083 // Shadow create info and store in map 9084 VkRenderPassCreateInfo *localRPCI = new VkRenderPassCreateInfo(*pCreateInfo); 9085 if (pCreateInfo->pAttachments) { 9086 localRPCI->pAttachments = new VkAttachmentDescription[localRPCI->attachmentCount]; 9087 memcpy((void *)localRPCI->pAttachments, pCreateInfo->pAttachments, 9088 localRPCI->attachmentCount * sizeof(VkAttachmentDescription)); 9089 } 9090 if (pCreateInfo->pSubpasses) { 9091 localRPCI->pSubpasses = new VkSubpassDescription[localRPCI->subpassCount]; 9092 memcpy((void *)localRPCI->pSubpasses, pCreateInfo->pSubpasses, localRPCI->subpassCount * sizeof(VkSubpassDescription)); 9093 9094 for (uint32_t i = 0; i < localRPCI->subpassCount; i++) { 9095 VkSubpassDescription *subpass = (VkSubpassDescription *)&localRPCI->pSubpasses[i]; 9096 const uint32_t attachmentCount = subpass->inputAttachmentCount + 9097 subpass->colorAttachmentCount * (1 + (subpass->pResolveAttachments ? 1 : 0)) + 9098 ((subpass->pDepthStencilAttachment) ? 1 : 0) + subpass->preserveAttachmentCount; 9099 VkAttachmentReference *attachments = new VkAttachmentReference[attachmentCount]; 9100 9101 memcpy(attachments, subpass->pInputAttachments, sizeof(attachments[0]) * subpass->inputAttachmentCount); 9102 subpass->pInputAttachments = attachments; 9103 attachments += subpass->inputAttachmentCount; 9104 9105 memcpy(attachments, subpass->pColorAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount); 9106 subpass->pColorAttachments = attachments; 9107 attachments += subpass->colorAttachmentCount; 9108 9109 if (subpass->pResolveAttachments) { 9110 memcpy(attachments, subpass->pResolveAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount); 9111 subpass->pResolveAttachments = attachments; 9112 attachments += subpass->colorAttachmentCount; 9113 } 9114 9115 if (subpass->pDepthStencilAttachment) { 9116 memcpy(attachments, subpass->pDepthStencilAttachment, sizeof(attachments[0]) * 1); 9117 subpass->pDepthStencilAttachment = attachments; 9118 attachments += 1; 9119 } 9120 9121 memcpy(attachments, subpass->pPreserveAttachments, sizeof(attachments[0]) * subpass->preserveAttachmentCount); 9122 subpass->pPreserveAttachments = &attachments->attachment; 9123 } 9124 } 9125 if (pCreateInfo->pDependencies) { 9126 localRPCI->pDependencies = new VkSubpassDependency[localRPCI->dependencyCount]; 9127 memcpy((void *)localRPCI->pDependencies, pCreateInfo->pDependencies, 9128 localRPCI->dependencyCount * sizeof(VkSubpassDependency)); 9129 } 9130 9131 auto render_pass = new RENDER_PASS_NODE(localRPCI); 9132 render_pass->renderPass = *pRenderPass; 9133 render_pass->hasSelfDependency = has_self_dependency; 9134 render_pass->subpassToNode = subpass_to_node; 9135#if MTMERGESOURCE 9136 // MTMTODO : Merge with code from above to eliminate duplication 9137 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { 9138 VkAttachmentDescription desc = pCreateInfo->pAttachments[i]; 9139 MT_PASS_ATTACHMENT_INFO pass_info; 9140 pass_info.load_op = desc.loadOp; 9141 pass_info.store_op = desc.storeOp; 9142 pass_info.stencil_load_op = desc.stencilLoadOp; 9143 pass_info.stencil_store_op = desc.stencilStoreOp; 9144 pass_info.attachment = i; 9145 render_pass->attachments.push_back(pass_info); 9146 } 9147 // TODO: Maybe fill list and then copy instead of locking 9148 std::unordered_map<uint32_t, bool> &attachment_first_read = render_pass->attachment_first_read; 9149 std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout = render_pass->attachment_first_layout; 9150 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { 9151 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i]; 9152 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { 9153 uint32_t attachment = subpass.pColorAttachments[j].attachment; 9154 if (!attachment_first_read.count(attachment)) { 9155 attachment_first_read.insert(std::make_pair(attachment, false)); 9156 attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout)); 9157 } 9158 } 9159 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { 9160 uint32_t attachment = subpass.pDepthStencilAttachment->attachment; 9161 if (!attachment_first_read.count(attachment)) { 9162 attachment_first_read.insert(std::make_pair(attachment, false)); 9163 attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout)); 9164 } 9165 } 9166 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { 9167 uint32_t attachment = subpass.pInputAttachments[j].attachment; 9168 if (!attachment_first_read.count(attachment)) { 9169 attachment_first_read.insert(std::make_pair(attachment, true)); 9170 attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout)); 9171 } 9172 } 9173 } 9174#endif 9175 dev_data->renderPassMap[*pRenderPass] = render_pass; 9176 } 9177 return result; 9178} 9179 9180// Free the renderpass shadow 9181static void deleteRenderPasses(layer_data *my_data) { 9182 for (auto renderPass : my_data->renderPassMap) { 9183 const VkRenderPassCreateInfo *pRenderPassInfo = renderPass.second->pCreateInfo; 9184 delete[] pRenderPassInfo->pAttachments; 9185 if (pRenderPassInfo->pSubpasses) { 9186 for (uint32_t i = 0; i < pRenderPassInfo->subpassCount; ++i) { 9187 // Attachements are all allocated in a block, so just need to 9188 // find the first non-null one to delete 9189 if (pRenderPassInfo->pSubpasses[i].pInputAttachments) { 9190 delete[] pRenderPassInfo->pSubpasses[i].pInputAttachments; 9191 } else if (pRenderPassInfo->pSubpasses[i].pColorAttachments) { 9192 delete[] pRenderPassInfo->pSubpasses[i].pColorAttachments; 9193 } else if (pRenderPassInfo->pSubpasses[i].pResolveAttachments) { 9194 delete[] pRenderPassInfo->pSubpasses[i].pResolveAttachments; 9195 } else if (pRenderPassInfo->pSubpasses[i].pPreserveAttachments) { 9196 delete[] pRenderPassInfo->pSubpasses[i].pPreserveAttachments; 9197 } 9198 } 9199 delete[] pRenderPassInfo->pSubpasses; 9200 } 9201 delete[] pRenderPassInfo->pDependencies; 9202 delete pRenderPassInfo; 9203 delete renderPass.second; 9204 } 9205 my_data->renderPassMap.clear(); 9206} 9207 9208static bool VerifyFramebufferAndRenderPassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) { 9209 bool skip_call = false; 9210 const VkRenderPassCreateInfo *pRenderPassInfo = dev_data->renderPassMap[pRenderPassBegin->renderPass]->pCreateInfo; 9211 const safe_VkFramebufferCreateInfo framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer]->createInfo; 9212 if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) { 9213 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9214 DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer " 9215 "with a different number of attachments."); 9216 } 9217 for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) { 9218 const VkImageView &image_view = framebufferInfo.pAttachments[i]; 9219 auto image_data = getImageViewData(dev_data, image_view); 9220 assert(image_data); 9221 const VkImage &image = image_data->image; 9222 const VkImageSubresourceRange &subRange = image_data->subresourceRange; 9223 IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout, 9224 pRenderPassInfo->pAttachments[i].initialLayout}; 9225 // TODO: Do not iterate over every possibility - consolidate where possible 9226 for (uint32_t j = 0; j < subRange.levelCount; j++) { 9227 uint32_t level = subRange.baseMipLevel + j; 9228 for (uint32_t k = 0; k < subRange.layerCount; k++) { 9229 uint32_t layer = subRange.baseArrayLayer + k; 9230 VkImageSubresource sub = {subRange.aspectMask, level, layer}; 9231 IMAGE_CMD_BUF_LAYOUT_NODE node; 9232 if (!FindLayout(pCB, image, sub, node)) { 9233 SetLayout(pCB, image, sub, newNode); 9234 continue; 9235 } 9236 if (newNode.layout != VK_IMAGE_LAYOUT_UNDEFINED && 9237 newNode.layout != node.layout) { 9238 skip_call |= 9239 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9240 DRAWSTATE_INVALID_RENDERPASS, "DS", 9241 "You cannot start a render pass using attachment %u " 9242 "where the render pass initial layout is %s and the previous " 9243 "known layout of the attachment is %s. The layouts must match, or " 9244 "the render pass initial layout for the attachment must be " 9245 "VK_IMAGE_LAYOUT_UNDEFINED", 9246 i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout)); 9247 } 9248 } 9249 } 9250 } 9251 return skip_call; 9252} 9253 9254static void TransitionAttachmentRefLayout(layer_data *dev_data, GLOBAL_CB_NODE *pCB, 9255 FRAMEBUFFER_NODE *pFramebuffer, 9256 VkAttachmentReference ref) 9257{ 9258 if (ref.attachment != VK_ATTACHMENT_UNUSED) { 9259 auto image_view = pFramebuffer->createInfo.pAttachments[ref.attachment]; 9260 SetLayout(dev_data, pCB, image_view, ref.layout); 9261 } 9262} 9263 9264static void TransitionSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin, 9265 const int subpass_index) { 9266 auto renderPass = getRenderPass(dev_data, pRenderPassBegin->renderPass); 9267 if (!renderPass) 9268 return; 9269 9270 auto framebuffer = getFramebuffer(dev_data, pRenderPassBegin->framebuffer); 9271 if (!framebuffer) 9272 return; 9273 9274 const VkSubpassDescription &subpass = renderPass->pCreateInfo->pSubpasses[subpass_index]; 9275 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { 9276 TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pInputAttachments[j]); 9277 } 9278 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { 9279 TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pColorAttachments[j]); 9280 } 9281 if (subpass.pDepthStencilAttachment) { 9282 TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, *subpass.pDepthStencilAttachment); 9283 } 9284} 9285 9286static bool validatePrimaryCommandBuffer(const layer_data *my_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) { 9287 bool skip_call = false; 9288 if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) { 9289 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9290 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.", 9291 cmd_name.c_str()); 9292 } 9293 return skip_call; 9294} 9295 9296static void TransitionFinalSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) { 9297 auto renderPass = getRenderPass(dev_data, pRenderPassBegin->renderPass); 9298 if (!renderPass) 9299 return; 9300 9301 const VkRenderPassCreateInfo *pRenderPassInfo = renderPass->pCreateInfo; 9302 auto framebuffer = getFramebuffer(dev_data, pRenderPassBegin->framebuffer); 9303 if (!framebuffer) 9304 return; 9305 9306 for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) { 9307 auto image_view = framebuffer->createInfo.pAttachments[i]; 9308 SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout); 9309 } 9310} 9311 9312static bool VerifyRenderAreaBounds(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin) { 9313 bool skip_call = false; 9314 const safe_VkFramebufferCreateInfo *pFramebufferInfo = &getFramebuffer(my_data, pRenderPassBegin->framebuffer)->createInfo; 9315 if (pRenderPassBegin->renderArea.offset.x < 0 || 9316 (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width || 9317 pRenderPassBegin->renderArea.offset.y < 0 || 9318 (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) { 9319 skip_call |= static_cast<bool>(log_msg( 9320 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9321 DRAWSTATE_INVALID_RENDER_AREA, "CORE", 9322 "Cannot execute a render pass with renderArea not within the bound of the " 9323 "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, " 9324 "height %d.", 9325 pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width, 9326 pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height)); 9327 } 9328 return skip_call; 9329} 9330 9331// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the 9332// [load|store]Op flag must be checked 9333// TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately. 9334template <typename T> static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) { 9335 if (color_depth_op != op && stencil_op != op) { 9336 return false; 9337 } 9338 bool check_color_depth_load_op = !vk_format_is_stencil_only(format); 9339 bool check_stencil_load_op = vk_format_is_depth_and_stencil(format) || !check_color_depth_load_op; 9340 9341 return (((check_color_depth_load_op == true) && (color_depth_op == op)) || 9342 ((check_stencil_load_op == true) && (stencil_op == op))); 9343} 9344 9345VKAPI_ATTR void VKAPI_CALL 9346CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) { 9347 bool skip_call = false; 9348 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 9349 std::unique_lock<std::mutex> lock(global_lock); 9350 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 9351 auto renderPass = pRenderPassBegin ? getRenderPass(dev_data, pRenderPassBegin->renderPass) : nullptr; 9352 auto framebuffer = pRenderPassBegin ? getFramebuffer(dev_data, pRenderPassBegin->framebuffer) : nullptr; 9353 if (pCB) { 9354 if (renderPass) { 9355 uint32_t clear_op_count = 0; 9356 pCB->activeFramebuffer = pRenderPassBegin->framebuffer; 9357 for (size_t i = 0; i < renderPass->attachments.size(); ++i) { 9358 MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i]; 9359 VkFormat format = renderPass->pCreateInfo->pAttachments[renderPass->attachments[i].attachment].format; 9360 if (FormatSpecificLoadAndStoreOpSettings(format, renderPass->attachments[i].load_op, 9361 renderPass->attachments[i].stencil_load_op, 9362 VK_ATTACHMENT_LOAD_OP_CLEAR)) { 9363 ++clear_op_count; 9364 std::function<bool()> function = [=]() { 9365 set_memory_valid(dev_data, fb_info.mem, true, fb_info.image); 9366 return false; 9367 }; 9368 pCB->validate_functions.push_back(function); 9369 } else if (FormatSpecificLoadAndStoreOpSettings(format, renderPass->attachments[i].load_op, 9370 renderPass->attachments[i].stencil_load_op, 9371 VK_ATTACHMENT_LOAD_OP_DONT_CARE)) { 9372 std::function<bool()> function = [=]() { 9373 set_memory_valid(dev_data, fb_info.mem, false, fb_info.image); 9374 return false; 9375 }; 9376 pCB->validate_functions.push_back(function); 9377 } else if (FormatSpecificLoadAndStoreOpSettings(format, renderPass->attachments[i].load_op, 9378 renderPass->attachments[i].stencil_load_op, 9379 VK_ATTACHMENT_LOAD_OP_LOAD)) { 9380 std::function<bool()> function = [=]() { 9381 return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image); 9382 }; 9383 pCB->validate_functions.push_back(function); 9384 } 9385 if (renderPass->attachment_first_read[renderPass->attachments[i].attachment]) { 9386 std::function<bool()> function = [=]() { 9387 return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image); 9388 }; 9389 pCB->validate_functions.push_back(function); 9390 } 9391 } 9392 if (clear_op_count > pRenderPassBegin->clearValueCount) { 9393 skip_call |= log_msg( 9394 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, 9395 reinterpret_cast<uint64_t &>(renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS", 9396 "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but the actual number " 9397 "of attachments in renderPass 0x%" PRIx64 " that use VK_ATTACHMENT_LOAD_OP_CLEAR is %u. The clearValueCount " 9398 "must therefore be greater than or equal to %u.", 9399 pRenderPassBegin->clearValueCount, reinterpret_cast<uint64_t &>(renderPass), clear_op_count, clear_op_count); 9400 } 9401 skip_call |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin); 9402 skip_call |= VerifyFramebufferAndRenderPassLayouts(dev_data, pCB, pRenderPassBegin); 9403 skip_call |= insideRenderPass(dev_data, pCB, "vkCmdBeginRenderPass"); 9404 skip_call |= ValidateDependencies(dev_data, framebuffer, renderPass); 9405 skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdBeginRenderPass"); 9406 skip_call |= addCmd(dev_data, pCB, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()"); 9407 pCB->activeRenderPass = renderPass; 9408 // This is a shallow copy as that is all that is needed for now 9409 pCB->activeRenderPassBeginInfo = *pRenderPassBegin; 9410 pCB->activeSubpass = 0; 9411 pCB->activeSubpassContents = contents; 9412 pCB->framebuffers.insert(pRenderPassBegin->framebuffer); 9413 // Connect this framebuffer to this cmdBuffer 9414 framebuffer->cb_bindings.insert(pCB); 9415 9416 // transition attachments to the correct layouts for the first subpass 9417 TransitionSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass); 9418 } else { 9419 skip_call |= 9420 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9421 DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()"); 9422 } 9423 } 9424 lock.unlock(); 9425 if (!skip_call) { 9426 dev_data->device_dispatch_table->CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents); 9427 } 9428} 9429 9430VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) { 9431 bool skip_call = false; 9432 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 9433 std::unique_lock<std::mutex> lock(global_lock); 9434 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 9435 if (pCB) { 9436 skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass"); 9437 skip_call |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()"); 9438 pCB->activeSubpass++; 9439 pCB->activeSubpassContents = contents; 9440 TransitionSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass); 9441 skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass"); 9442 } 9443 lock.unlock(); 9444 if (!skip_call) 9445 dev_data->device_dispatch_table->CmdNextSubpass(commandBuffer, contents); 9446} 9447 9448VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) { 9449 bool skip_call = false; 9450 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 9451 std::unique_lock<std::mutex> lock(global_lock); 9452 auto pCB = getCBNode(dev_data, commandBuffer); 9453 if (pCB) { 9454 RENDER_PASS_NODE* pRPNode = pCB->activeRenderPass; 9455 auto framebuffer = getFramebuffer(dev_data, pCB->activeFramebuffer); 9456 if (pRPNode) { 9457 for (size_t i = 0; i < pRPNode->attachments.size(); ++i) { 9458 MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i]; 9459 VkFormat format = pRPNode->pCreateInfo->pAttachments[pRPNode->attachments[i].attachment].format; 9460 if (FormatSpecificLoadAndStoreOpSettings(format, pRPNode->attachments[i].store_op, 9461 pRPNode->attachments[i].stencil_store_op, VK_ATTACHMENT_STORE_OP_STORE)) { 9462 std::function<bool()> function = [=]() { 9463 set_memory_valid(dev_data, fb_info.mem, true, fb_info.image); 9464 return false; 9465 }; 9466 pCB->validate_functions.push_back(function); 9467 } else if (FormatSpecificLoadAndStoreOpSettings(format, pRPNode->attachments[i].store_op, 9468 pRPNode->attachments[i].stencil_store_op, 9469 VK_ATTACHMENT_STORE_OP_DONT_CARE)) { 9470 std::function<bool()> function = [=]() { 9471 set_memory_valid(dev_data, fb_info.mem, false, fb_info.image); 9472 return false; 9473 }; 9474 pCB->validate_functions.push_back(function); 9475 } 9476 } 9477 } 9478 skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass"); 9479 skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass"); 9480 skip_call |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()"); 9481 TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo); 9482 pCB->activeRenderPass = nullptr; 9483 pCB->activeSubpass = 0; 9484 pCB->activeFramebuffer = VK_NULL_HANDLE; 9485 } 9486 lock.unlock(); 9487 if (!skip_call) 9488 dev_data->device_dispatch_table->CmdEndRenderPass(commandBuffer); 9489} 9490 9491static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, uint32_t primaryAttach, 9492 uint32_t secondaryAttach, const char *msg) { 9493 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9494 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", 9495 "vkCmdExecuteCommands() called w/ invalid Secondary Cmd Buffer 0x%" PRIx64 " which has a render pass " 9496 "that is not compatible with the Primary Cmd Buffer current render pass. " 9497 "Attachment %u is not compatible with %u: %s", 9498 reinterpret_cast<uint64_t &>(secondaryBuffer), primaryAttach, secondaryAttach, msg); 9499} 9500 9501static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, 9502 VkRenderPassCreateInfo const *primaryPassCI, uint32_t primaryAttach, 9503 VkCommandBuffer secondaryBuffer, VkRenderPassCreateInfo const *secondaryPassCI, 9504 uint32_t secondaryAttach, bool is_multi) { 9505 bool skip_call = false; 9506 if (primaryPassCI->attachmentCount <= primaryAttach) { 9507 primaryAttach = VK_ATTACHMENT_UNUSED; 9508 } 9509 if (secondaryPassCI->attachmentCount <= secondaryAttach) { 9510 secondaryAttach = VK_ATTACHMENT_UNUSED; 9511 } 9512 if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) { 9513 return skip_call; 9514 } 9515 if (primaryAttach == VK_ATTACHMENT_UNUSED) { 9516 skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, 9517 "The first is unused while the second is not."); 9518 return skip_call; 9519 } 9520 if (secondaryAttach == VK_ATTACHMENT_UNUSED) { 9521 skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, 9522 "The second is unused while the first is not."); 9523 return skip_call; 9524 } 9525 if (primaryPassCI->pAttachments[primaryAttach].format != secondaryPassCI->pAttachments[secondaryAttach].format) { 9526 skip_call |= 9527 logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different formats."); 9528 } 9529 if (primaryPassCI->pAttachments[primaryAttach].samples != secondaryPassCI->pAttachments[secondaryAttach].samples) { 9530 skip_call |= 9531 logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different samples."); 9532 } 9533 if (is_multi && primaryPassCI->pAttachments[primaryAttach].flags != secondaryPassCI->pAttachments[secondaryAttach].flags) { 9534 skip_call |= 9535 logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different flags."); 9536 } 9537 return skip_call; 9538} 9539 9540static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, 9541 VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer, 9542 VkRenderPassCreateInfo const *secondaryPassCI, const int subpass, bool is_multi) { 9543 bool skip_call = false; 9544 const VkSubpassDescription &primary_desc = primaryPassCI->pSubpasses[subpass]; 9545 const VkSubpassDescription &secondary_desc = secondaryPassCI->pSubpasses[subpass]; 9546 uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount); 9547 for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) { 9548 uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED; 9549 if (i < primary_desc.inputAttachmentCount) { 9550 primary_input_attach = primary_desc.pInputAttachments[i].attachment; 9551 } 9552 if (i < secondary_desc.inputAttachmentCount) { 9553 secondary_input_attach = secondary_desc.pInputAttachments[i].attachment; 9554 } 9555 skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_input_attach, secondaryBuffer, 9556 secondaryPassCI, secondary_input_attach, is_multi); 9557 } 9558 uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount); 9559 for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) { 9560 uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED; 9561 if (i < primary_desc.colorAttachmentCount) { 9562 primary_color_attach = primary_desc.pColorAttachments[i].attachment; 9563 } 9564 if (i < secondary_desc.colorAttachmentCount) { 9565 secondary_color_attach = secondary_desc.pColorAttachments[i].attachment; 9566 } 9567 skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_color_attach, secondaryBuffer, 9568 secondaryPassCI, secondary_color_attach, is_multi); 9569 uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED; 9570 if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) { 9571 primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment; 9572 } 9573 if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) { 9574 secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment; 9575 } 9576 skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_resolve_attach, 9577 secondaryBuffer, secondaryPassCI, secondary_resolve_attach, is_multi); 9578 } 9579 uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED; 9580 if (primary_desc.pDepthStencilAttachment) { 9581 primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment; 9582 } 9583 if (secondary_desc.pDepthStencilAttachment) { 9584 secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment; 9585 } 9586 skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_depthstencil_attach, 9587 secondaryBuffer, secondaryPassCI, secondary_depthstencil_attach, is_multi); 9588 return skip_call; 9589} 9590 9591// Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible. 9592// This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and 9593// will then feed into this function 9594static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, 9595 VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer, 9596 VkRenderPassCreateInfo const *secondaryPassCI) { 9597 bool skip_call = false; 9598 9599 if (primaryPassCI->subpassCount != secondaryPassCI->subpassCount) { 9600 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9601 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", 9602 "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%" PRIx64 9603 " that has a subpassCount of %u that is incompatible with the primary Cmd Buffer 0x%" PRIx64 9604 " that has a subpassCount of %u.", 9605 reinterpret_cast<uint64_t &>(secondaryBuffer), secondaryPassCI->subpassCount, 9606 reinterpret_cast<uint64_t &>(primaryBuffer), primaryPassCI->subpassCount); 9607 } else { 9608 for (uint32_t i = 0; i < primaryPassCI->subpassCount; ++i) { 9609 skip_call |= validateSubpassCompatibility(dev_data, primaryBuffer, primaryPassCI, secondaryBuffer, secondaryPassCI, i, 9610 primaryPassCI->subpassCount > 1); 9611 } 9612 } 9613 return skip_call; 9614} 9615 9616static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB, 9617 VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) { 9618 bool skip_call = false; 9619 if (!pSubCB->beginInfo.pInheritanceInfo) { 9620 return skip_call; 9621 } 9622 VkFramebuffer primary_fb = pCB->activeFramebuffer; 9623 VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer; 9624 if (secondary_fb != VK_NULL_HANDLE) { 9625 if (primary_fb != secondary_fb) { 9626 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9627 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", 9628 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p which has a framebuffer 0x%" PRIx64 9629 " that is not compatible with the current framebuffer 0x%" PRIx64 ".", 9630 (void *)secondaryBuffer, (uint64_t)(secondary_fb), (uint64_t)(primary_fb)); 9631 } 9632 auto fb = getFramebuffer(dev_data, secondary_fb); 9633 if (!fb) { 9634 skip_call |= 9635 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9636 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p " 9637 "which has invalid framebuffer 0x%" PRIx64 ".", 9638 (void *)secondaryBuffer, (uint64_t)(secondary_fb)); 9639 return skip_call; 9640 } 9641 auto cb_renderpass = getRenderPass(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass); 9642 if (cb_renderpass->renderPass != fb->createInfo.renderPass) { 9643 skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb->renderPassCreateInfo.ptr(), secondaryBuffer, 9644 cb_renderpass->pCreateInfo); 9645 } 9646 } 9647 return skip_call; 9648} 9649 9650static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) { 9651 bool skip_call = false; 9652 unordered_set<int> activeTypes; 9653 for (auto queryObject : pCB->activeQueries) { 9654 auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool); 9655 if (queryPoolData != dev_data->queryPoolMap.end()) { 9656 if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS && 9657 pSubCB->beginInfo.pInheritanceInfo) { 9658 VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics; 9659 if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) { 9660 skip_call |= log_msg( 9661 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9662 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", 9663 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p " 9664 "which has invalid active query pool 0x%" PRIx64 ". Pipeline statistics is being queried so the command " 9665 "buffer must have all bits set on the queryPool.", 9666 reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first)); 9667 } 9668 } 9669 activeTypes.insert(queryPoolData->second.createInfo.queryType); 9670 } 9671 } 9672 for (auto queryObject : pSubCB->startedQueries) { 9673 auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool); 9674 if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) { 9675 skip_call |= 9676 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9677 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", 9678 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p " 9679 "which has invalid active query pool 0x%" PRIx64 "of type %d but a query of that type has been started on " 9680 "secondary Cmd Buffer 0x%p.", 9681 reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first), 9682 queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer)); 9683 } 9684 } 9685 return skip_call; 9686} 9687 9688VKAPI_ATTR void VKAPI_CALL 9689CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) { 9690 bool skip_call = false; 9691 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 9692 std::unique_lock<std::mutex> lock(global_lock); 9693 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 9694 if (pCB) { 9695 GLOBAL_CB_NODE *pSubCB = NULL; 9696 for (uint32_t i = 0; i < commandBuffersCount; i++) { 9697 pSubCB = getCBNode(dev_data, pCommandBuffers[i]); 9698 if (!pSubCB) { 9699 skip_call |= 9700 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9701 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", 9702 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p in element %u of pCommandBuffers array.", 9703 (void *)pCommandBuffers[i], i); 9704 } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) { 9705 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 9706 __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", 9707 "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%p in element %u of pCommandBuffers " 9708 "array. All cmd buffers in pCommandBuffers array must be secondary.", 9709 (void *)pCommandBuffers[i], i); 9710 } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set 9711 auto secondary_rp_node = getRenderPass(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass); 9712 if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { 9713 skip_call |= log_msg( 9714 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 9715 (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS", 9716 "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) executed within render pass (0x%" PRIxLEAST64 9717 ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.", 9718 (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass->renderPass); 9719 } else { 9720 // Make sure render pass is compatible with parent command buffer pass if has continue 9721 if (pCB->activeRenderPass->renderPass != secondary_rp_node->renderPass) { 9722 skip_call |= validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass->pCreateInfo, 9723 pCommandBuffers[i], secondary_rp_node->pCreateInfo); 9724 } 9725 skip_call |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB); 9726 } 9727 string errorString = ""; 9728 if ((pCB->activeRenderPass->renderPass != secondary_rp_node->renderPass) && 9729 !verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->pCreateInfo, secondary_rp_node->pCreateInfo, 9730 errorString)) { 9731 skip_call |= log_msg( 9732 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 9733 (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS", 9734 "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 9735 ") is incompatible w/ primary command buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 ") due to: %s", 9736 (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer, 9737 (uint64_t)pCB->activeRenderPass->renderPass, errorString.c_str()); 9738 } 9739 // If framebuffer for secondary CB is not NULL, then it must match FB from vkCmdBeginRenderPass() 9740 // that this CB will be executed in AND framebuffer must have been created w/ RP compatible w/ renderpass 9741 if (pSubCB->beginInfo.pInheritanceInfo->framebuffer) { 9742 if (pSubCB->beginInfo.pInheritanceInfo->framebuffer != pCB->activeRenderPassBeginInfo.framebuffer) { 9743 skip_call |= log_msg( 9744 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 9745 (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS", 9746 "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) references framebuffer (0x%" PRIxLEAST64 9747 ") that does not match framebuffer (0x%" PRIxLEAST64 ") in active renderpass (0x%" PRIxLEAST64 ").", 9748 (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->framebuffer, 9749 (uint64_t)pCB->activeRenderPassBeginInfo.framebuffer, (uint64_t)pCB->activeRenderPass->renderPass); 9750 } 9751 } 9752 } 9753 // TODO(mlentine): Move more logic into this method 9754 skip_call |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB); 9755 skip_call |= validateCommandBufferState(dev_data, pSubCB); 9756 // Secondary cmdBuffers are considered pending execution starting w/ 9757 // being recorded 9758 if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) { 9759 if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) { 9760 skip_call |= log_msg( 9761 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 9762 (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS", 9763 "Attempt to simultaneously execute CB 0x%" PRIxLEAST64 " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT " 9764 "set!", 9765 (uint64_t)(pCB->commandBuffer)); 9766 } 9767 if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) { 9768 // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous 9769 skip_call |= log_msg( 9770 dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 9771 (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS", 9772 "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIxLEAST64 9773 ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer " 9774 "(0x%" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT " 9775 "set, even though it does.", 9776 (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer)); 9777 pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT; 9778 } 9779 } 9780 if (!pCB->activeQueries.empty() && !dev_data->phys_dev_properties.features.inheritedQueries) { 9781 skip_call |= 9782 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 9783 reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 9784 "vkCmdExecuteCommands(): Secondary Command Buffer " 9785 "(0x%" PRIxLEAST64 ") cannot be submitted with a query in " 9786 "flight and inherited queries not " 9787 "supported on this device.", 9788 reinterpret_cast<uint64_t>(pCommandBuffers[i])); 9789 } 9790 pSubCB->primaryCommandBuffer = pCB->commandBuffer; 9791 pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer); 9792 dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer); 9793 } 9794 skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands"); 9795 skip_call |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()"); 9796 } 9797 lock.unlock(); 9798 if (!skip_call) 9799 dev_data->device_dispatch_table->CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers); 9800} 9801 9802static bool ValidateMapImageLayouts(VkDevice device, VkDeviceMemory mem) { 9803 bool skip_call = false; 9804 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 9805 auto mem_info = getMemObjInfo(dev_data, mem); 9806 if ((mem_info) && (mem_info->image != VK_NULL_HANDLE)) { 9807 std::vector<VkImageLayout> layouts; 9808 if (FindLayouts(dev_data, mem_info->image, layouts)) { 9809 for (auto layout : layouts) { 9810 if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) { 9811 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 9812 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only " 9813 "GENERAL or PREINITIALIZED are supported.", 9814 string_VkImageLayout(layout)); 9815 } 9816 } 9817 } 9818 } 9819 return skip_call; 9820} 9821 9822VKAPI_ATTR VkResult VKAPI_CALL 9823MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) { 9824 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 9825 9826 bool skip_call = false; 9827 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 9828 std::unique_lock<std::mutex> lock(global_lock); 9829#if MTMERGESOURCE 9830 DEVICE_MEM_INFO *pMemObj = getMemObjInfo(dev_data, mem); 9831 if (pMemObj) { 9832 pMemObj->valid = true; 9833 if ((dev_data->phys_dev_mem_props.memoryTypes[pMemObj->allocInfo.memoryTypeIndex].propertyFlags & 9834 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) { 9835 skip_call = 9836 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 9837 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM", 9838 "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIxLEAST64, (uint64_t)mem); 9839 } 9840 } 9841 skip_call |= validateMemRange(dev_data, mem, offset, size); 9842#endif 9843 skip_call |= ValidateMapImageLayouts(device, mem); 9844 lock.unlock(); 9845 9846 if (!skip_call) { 9847 result = dev_data->device_dispatch_table->MapMemory(device, mem, offset, size, flags, ppData); 9848 if (VK_SUCCESS == result) { 9849#if MTMERGESOURCE 9850 lock.lock(); 9851 storeMemRanges(dev_data, mem, offset, size); 9852 initializeAndTrackMemory(dev_data, mem, size, ppData); 9853 lock.unlock(); 9854#endif 9855 } 9856 } 9857 return result; 9858} 9859 9860VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) { 9861 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 9862 bool skip_call = false; 9863 9864 std::unique_lock<std::mutex> lock(global_lock); 9865 skip_call |= deleteMemRanges(my_data, mem); 9866 lock.unlock(); 9867 if (!skip_call) { 9868 my_data->device_dispatch_table->UnmapMemory(device, mem); 9869 } 9870} 9871 9872static bool validateMemoryIsMapped(layer_data *my_data, const char *funcName, uint32_t memRangeCount, 9873 const VkMappedMemoryRange *pMemRanges) { 9874 bool skip_call = false; 9875 for (uint32_t i = 0; i < memRangeCount; ++i) { 9876 auto mem_info = getMemObjInfo(my_data, pMemRanges[i].memory); 9877 if (mem_info) { 9878 if (mem_info->memRange.offset > pMemRanges[i].offset) { 9879 skip_call |= 9880 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 9881 (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM", 9882 "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset " 9883 "(" PRINTF_SIZE_T_SPECIFIER ").", 9884 funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_info->memRange.offset)); 9885 } 9886 9887 const uint64_t my_dataTerminus = 9888 (mem_info->memRange.size == VK_WHOLE_SIZE) ? mem_info->allocInfo.allocationSize : 9889 (mem_info->memRange.offset + mem_info->memRange.size); 9890 if (pMemRanges[i].size != VK_WHOLE_SIZE && (my_dataTerminus < (pMemRanges[i].offset + pMemRanges[i].size))) { 9891 skip_call |= log_msg( 9892 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 9893 (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM", 9894 "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER ") exceeds the Memory Object's upper-bound " 9895 "(" PRINTF_SIZE_T_SPECIFIER ").", 9896 funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size), static_cast<size_t>(my_dataTerminus)); 9897 } 9898 } 9899 } 9900 return skip_call; 9901} 9902 9903static bool validateAndCopyNoncoherentMemoryToDriver(layer_data *my_data, uint32_t memRangeCount, 9904 const VkMappedMemoryRange *pMemRanges) { 9905 bool skip_call = false; 9906 for (uint32_t i = 0; i < memRangeCount; ++i) { 9907 auto mem_info = getMemObjInfo(my_data, pMemRanges[i].memory); 9908 if (mem_info) { 9909 if (mem_info->pData) { 9910 VkDeviceSize size = mem_info->memRange.size; 9911 VkDeviceSize half_size = (size / 2); 9912 char *data = static_cast<char *>(mem_info->pData); 9913 for (auto j = 0; j < half_size; ++j) { 9914 if (data[j] != NoncoherentMemoryFillValue) { 9915 skip_call |= log_msg( 9916 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 9917 (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM", 9918 "Memory overflow was detected on mem obj 0x%" PRIxLEAST64, (uint64_t)pMemRanges[i].memory); 9919 } 9920 } 9921 for (auto j = size + half_size; j < 2 * size; ++j) { 9922 if (data[j] != NoncoherentMemoryFillValue) { 9923 skip_call |= log_msg( 9924 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 9925 (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM", 9926 "Memory overflow was detected on mem obj 0x%" PRIxLEAST64, (uint64_t)pMemRanges[i].memory); 9927 } 9928 } 9929 memcpy(mem_info->pDriverData, static_cast<void *>(data + (size_t)(half_size)), (size_t)(size)); 9930 } 9931 } 9932 } 9933 return skip_call; 9934} 9935 9936VkResult VKAPI_CALL 9937FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) { 9938 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 9939 bool skip_call = false; 9940 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 9941 9942 std::unique_lock<std::mutex> lock(global_lock); 9943 skip_call |= validateAndCopyNoncoherentMemoryToDriver(my_data, memRangeCount, pMemRanges); 9944 skip_call |= validateMemoryIsMapped(my_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges); 9945 lock.unlock(); 9946 if (!skip_call) { 9947 result = my_data->device_dispatch_table->FlushMappedMemoryRanges(device, memRangeCount, pMemRanges); 9948 } 9949 return result; 9950} 9951 9952VkResult VKAPI_CALL 9953InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) { 9954 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 9955 bool skip_call = false; 9956 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 9957 9958 std::unique_lock<std::mutex> lock(global_lock); 9959 skip_call |= validateMemoryIsMapped(my_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges); 9960 lock.unlock(); 9961 if (!skip_call) { 9962 result = my_data->device_dispatch_table->InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges); 9963 } 9964 return result; 9965} 9966 9967VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) { 9968 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 9969 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 9970 bool skip_call = false; 9971 std::unique_lock<std::mutex> lock(global_lock); 9972 auto image_node = getImageNode(dev_data, image); 9973 if (image_node) { 9974 // Track objects tied to memory 9975 uint64_t image_handle = reinterpret_cast<uint64_t &>(image); 9976 skip_call = set_mem_binding(dev_data, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory"); 9977 VkMemoryRequirements memRequirements; 9978 lock.unlock(); 9979 dev_data->device_dispatch_table->GetImageMemoryRequirements(device, image, &memRequirements); 9980 lock.lock(); 9981 9982 // Track and validate bound memory range information 9983 auto mem_info = getMemObjInfo(dev_data, mem); 9984 if (mem_info) { 9985 const MEMORY_RANGE range = 9986 insert_memory_ranges(image_handle, mem, memoryOffset, memRequirements, mem_info->imageRanges); 9987 skip_call |= validate_memory_range(dev_data, mem_info->bufferRanges, range, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT); 9988 skip_call |= ValidateMemoryTypes(dev_data, mem_info, memRequirements.memoryTypeBits, "vkBindImageMemory"); 9989 } 9990 9991 print_mem_list(dev_data); 9992 lock.unlock(); 9993 if (!skip_call) { 9994 result = dev_data->device_dispatch_table->BindImageMemory(device, image, mem, memoryOffset); 9995 lock.lock(); 9996 dev_data->memObjMap[mem].get()->image = image; 9997 image_node->mem = mem; 9998 image_node->memOffset = memoryOffset; 9999 image_node->memSize = memRequirements.size; 10000 lock.unlock(); 10001 } 10002 } else { 10003 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 10004 reinterpret_cast<const uint64_t &>(image), __LINE__, MEMTRACK_INVALID_OBJECT, "MT", 10005 "vkBindImageMemory: Cannot find invalid image 0x%" PRIx64 ", has it already been deleted?", 10006 reinterpret_cast<const uint64_t &>(image)); 10007 } 10008 return result; 10009} 10010 10011VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) { 10012 bool skip_call = false; 10013 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 10014 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 10015 std::unique_lock<std::mutex> lock(global_lock); 10016 auto event_node = dev_data->eventMap.find(event); 10017 if (event_node != dev_data->eventMap.end()) { 10018 event_node->second.needsSignaled = false; 10019 event_node->second.stageMask = VK_PIPELINE_STAGE_HOST_BIT; 10020 if (event_node->second.write_in_use) { 10021 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT, 10022 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS", 10023 "Cannot call vkSetEvent() on event 0x%" PRIxLEAST64 " that is already in use by a command buffer.", 10024 reinterpret_cast<const uint64_t &>(event)); 10025 } 10026 } 10027 lock.unlock(); 10028 // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event 10029 // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the 10030 // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297) 10031 for (auto queue_data : dev_data->queueMap) { 10032 auto event_entry = queue_data.second.eventToStageMap.find(event); 10033 if (event_entry != queue_data.second.eventToStageMap.end()) { 10034 event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT; 10035 } 10036 } 10037 if (!skip_call) 10038 result = dev_data->device_dispatch_table->SetEvent(device, event); 10039 return result; 10040} 10041 10042VKAPI_ATTR VkResult VKAPI_CALL 10043QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) { 10044 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map); 10045 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 10046 bool skip_call = false; 10047 std::unique_lock<std::mutex> lock(global_lock); 10048 auto pFence = getFenceNode(dev_data, fence); 10049 auto pQueue = getQueueNode(dev_data, queue); 10050 10051 // First verify that fence is not in use 10052 skip_call |= ValidateFenceForSubmit(dev_data, pFence); 10053 10054 if (fence != VK_NULL_HANDLE) { 10055 SubmitFence(pQueue, pFence); 10056 } 10057 10058 for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) { 10059 const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx]; 10060 // Track objects tied to memory 10061 for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) { 10062 for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) { 10063 if (set_sparse_mem_binding(dev_data, bindInfo.pBufferBinds[j].pBinds[k].memory, 10064 (uint64_t)bindInfo.pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, 10065 "vkQueueBindSparse")) 10066 skip_call = true; 10067 } 10068 } 10069 for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) { 10070 for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) { 10071 if (set_sparse_mem_binding(dev_data, bindInfo.pImageOpaqueBinds[j].pBinds[k].memory, 10072 (uint64_t)bindInfo.pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 10073 "vkQueueBindSparse")) 10074 skip_call = true; 10075 } 10076 } 10077 for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) { 10078 for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) { 10079 if (set_sparse_mem_binding(dev_data, bindInfo.pImageBinds[j].pBinds[k].memory, 10080 (uint64_t)bindInfo.pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 10081 "vkQueueBindSparse")) 10082 skip_call = true; 10083 } 10084 } 10085 for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) { 10086 VkSemaphore semaphore = bindInfo.pWaitSemaphores[i]; 10087 auto pSemaphore = getSemaphoreNode(dev_data, semaphore); 10088 if (pSemaphore) { 10089 if (pSemaphore->signaled) { 10090 pSemaphore->signaled = false; 10091 } else { 10092 skip_call |= 10093 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, 10094 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS", 10095 "vkQueueBindSparse: Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 10096 " that has no way to be signaled.", 10097 reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore)); 10098 } 10099 } 10100 } 10101 for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) { 10102 VkSemaphore semaphore = bindInfo.pSignalSemaphores[i]; 10103 auto pSemaphore = getSemaphoreNode(dev_data, semaphore); 10104 if (pSemaphore) { 10105 if (pSemaphore->signaled) { 10106 skip_call = 10107 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, 10108 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS", 10109 "vkQueueBindSparse: Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64 10110 ", but that semaphore is already signaled.", 10111 reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore)); 10112 } 10113 pSemaphore->signaled = true; 10114 } 10115 } 10116 } 10117 print_mem_list(dev_data); 10118 lock.unlock(); 10119 10120 if (!skip_call) 10121 return dev_data->device_dispatch_table->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence); 10122 10123 return result; 10124} 10125 10126VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo, 10127 const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) { 10128 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 10129 VkResult result = dev_data->device_dispatch_table->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore); 10130 if (result == VK_SUCCESS) { 10131 std::lock_guard<std::mutex> lock(global_lock); 10132 SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore]; 10133 sNode->signaled = false; 10134 sNode->queue = VK_NULL_HANDLE; 10135 sNode->in_use.store(0); 10136 } 10137 return result; 10138} 10139 10140VKAPI_ATTR VkResult VKAPI_CALL 10141CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) { 10142 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 10143 VkResult result = dev_data->device_dispatch_table->CreateEvent(device, pCreateInfo, pAllocator, pEvent); 10144 if (result == VK_SUCCESS) { 10145 std::lock_guard<std::mutex> lock(global_lock); 10146 dev_data->eventMap[*pEvent].needsSignaled = false; 10147 dev_data->eventMap[*pEvent].in_use.store(0); 10148 dev_data->eventMap[*pEvent].write_in_use = 0; 10149 dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0); 10150 } 10151 return result; 10152} 10153 10154VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo, 10155 const VkAllocationCallbacks *pAllocator, 10156 VkSwapchainKHR *pSwapchain) { 10157 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 10158 VkResult result = dev_data->device_dispatch_table->CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain); 10159 10160 if (VK_SUCCESS == result) { 10161 std::lock_guard<std::mutex> lock(global_lock); 10162 dev_data->device_extensions.swapchainMap[*pSwapchain] = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo)); 10163 } 10164 10165 return result; 10166} 10167 10168VKAPI_ATTR void VKAPI_CALL 10169DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) { 10170 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 10171 bool skip_call = false; 10172 10173 std::unique_lock<std::mutex> lock(global_lock); 10174 auto swapchain_data = getSwapchainNode(dev_data, swapchain); 10175 if (swapchain_data) { 10176 if (swapchain_data->images.size() > 0) { 10177 for (auto swapchain_image : swapchain_data->images) { 10178 auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image); 10179 if (image_sub != dev_data->imageSubresourceMap.end()) { 10180 for (auto imgsubpair : image_sub->second) { 10181 auto image_item = dev_data->imageLayoutMap.find(imgsubpair); 10182 if (image_item != dev_data->imageLayoutMap.end()) { 10183 dev_data->imageLayoutMap.erase(image_item); 10184 } 10185 } 10186 dev_data->imageSubresourceMap.erase(image_sub); 10187 } 10188 skip_call = 10189 clear_object_binding(dev_data, (uint64_t)swapchain_image, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT); 10190 dev_data->imageMap.erase(swapchain_image); 10191 } 10192 } 10193 dev_data->device_extensions.swapchainMap.erase(swapchain); 10194 } 10195 lock.unlock(); 10196 if (!skip_call) 10197 dev_data->device_dispatch_table->DestroySwapchainKHR(device, swapchain, pAllocator); 10198} 10199 10200VKAPI_ATTR VkResult VKAPI_CALL 10201GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) { 10202 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 10203 VkResult result = dev_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages); 10204 10205 if (result == VK_SUCCESS && pSwapchainImages != NULL) { 10206 // This should never happen and is checked by param checker. 10207 if (!pCount) 10208 return result; 10209 std::lock_guard<std::mutex> lock(global_lock); 10210 const size_t count = *pCount; 10211 auto swapchain_node = getSwapchainNode(dev_data, swapchain); 10212 if (swapchain_node && !swapchain_node->images.empty()) { 10213 // TODO : Not sure I like the memcmp here, but it works 10214 const bool mismatch = (swapchain_node->images.size() != count || 10215 memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count)); 10216 if (mismatch) { 10217 // TODO: Verify against Valid Usage section of extension 10218 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, 10219 (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN", 10220 "vkGetSwapchainInfoKHR(0x%" PRIx64 10221 ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data", 10222 (uint64_t)(swapchain)); 10223 } 10224 } 10225 for (uint32_t i = 0; i < *pCount; ++i) { 10226 IMAGE_LAYOUT_NODE image_layout_node; 10227 image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED; 10228 image_layout_node.format = swapchain_node->createInfo.imageFormat; 10229 // Add imageMap entries for each swapchain image 10230 VkImageCreateInfo image_ci = {}; 10231 image_ci.mipLevels = 1; 10232 image_ci.arrayLayers = swapchain_node->createInfo.imageArrayLayers; 10233 image_ci.usage = swapchain_node->createInfo.imageUsage; 10234 image_ci.format = swapchain_node->createInfo.imageFormat; 10235 image_ci.samples = VK_SAMPLE_COUNT_1_BIT; 10236 image_ci.extent.width = swapchain_node->createInfo.imageExtent.width; 10237 image_ci.extent.height = swapchain_node->createInfo.imageExtent.height; 10238 image_ci.sharingMode = swapchain_node->createInfo.imageSharingMode; 10239 dev_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_NODE>(new IMAGE_NODE(pSwapchainImages[i], &image_ci)); 10240 auto &image_node = dev_data->imageMap[pSwapchainImages[i]]; 10241 image_node->valid = false; 10242 image_node->mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY; 10243 swapchain_node->images.push_back(pSwapchainImages[i]); 10244 ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()}; 10245 dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair); 10246 dev_data->imageLayoutMap[subpair] = image_layout_node; 10247 dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain; 10248 } 10249 } 10250 return result; 10251} 10252 10253VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) { 10254 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map); 10255 bool skip_call = false; 10256 10257 std::lock_guard<std::mutex> lock(global_lock); 10258 for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) { 10259 auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]); 10260 if (pSemaphore && !pSemaphore->signaled) { 10261 skip_call |= 10262 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 10263 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS", 10264 "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.", 10265 reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(pPresentInfo->pWaitSemaphores[i])); 10266 } 10267 } 10268 10269 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) { 10270 auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]); 10271 if (swapchain_data && pPresentInfo->pImageIndices[i] < swapchain_data->images.size()) { 10272 VkImage image = swapchain_data->images[pPresentInfo->pImageIndices[i]]; 10273 skip_call |= validate_memory_is_valid(dev_data, getImageNode(dev_data, image)->mem, "vkQueuePresentKHR()", image); 10274 vector<VkImageLayout> layouts; 10275 if (FindLayouts(dev_data, image, layouts)) { 10276 for (auto layout : layouts) { 10277 if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) { 10278 skip_call |= 10279 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, 10280 reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 10281 "Images passed to present must be in layout " 10282 "PRESENT_SOURCE_KHR but is in %s", 10283 string_VkImageLayout(layout)); 10284 } 10285 } 10286 } 10287 } 10288 } 10289 10290 if (skip_call) { 10291 return VK_ERROR_VALIDATION_FAILED_EXT; 10292 } 10293 10294 VkResult result = dev_data->device_dispatch_table->QueuePresentKHR(queue, pPresentInfo); 10295 10296 if (result != VK_ERROR_VALIDATION_FAILED_EXT) { 10297 // Semaphore waits occur before error generation, if the call reached 10298 // the ICD. (Confirm?) 10299 for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) { 10300 auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]); 10301 if (pSemaphore && pSemaphore->signaled) { 10302 pSemaphore->signaled = false; 10303 } 10304 } 10305 } 10306 10307 return result; 10308} 10309 10310VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, 10311 VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) { 10312 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 10313 bool skip_call = false; 10314 10315 std::unique_lock<std::mutex> lock(global_lock); 10316 auto pSemaphore = getSemaphoreNode(dev_data, semaphore); 10317 if (pSemaphore && pSemaphore->signaled) { 10318 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, 10319 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS", 10320 "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state"); 10321 } 10322 10323 auto pFence = getFenceNode(dev_data, fence); 10324 if (pFence) { 10325 skip_call |= ValidateFenceForSubmit(dev_data, pFence); 10326 } 10327 lock.unlock(); 10328 10329 if (skip_call) 10330 return VK_ERROR_VALIDATION_FAILED_EXT; 10331 10332 VkResult result = 10333 dev_data->device_dispatch_table->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex); 10334 10335 lock.lock(); 10336 if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) { 10337 if (pFence) { 10338 pFence->state = FENCE_INFLIGHT; 10339 } 10340 10341 // A successful call to AcquireNextImageKHR counts as a signal operation on semaphore 10342 if (pSemaphore) { 10343 pSemaphore->signaled = true; 10344 } 10345 } 10346 lock.unlock(); 10347 10348 return result; 10349} 10350 10351VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount, 10352 VkPhysicalDevice *pPhysicalDevices) { 10353 bool skip_call = false; 10354 layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); 10355 if (my_data->instance_state) { 10356 // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS 10357 if (NULL == pPhysicalDevices) { 10358 my_data->instance_state->vkEnumeratePhysicalDevicesState = QUERY_COUNT; 10359 } else { 10360 if (UNCALLED == my_data->instance_state->vkEnumeratePhysicalDevicesState) { 10361 // Flag warning here. You can call this without having queried the count, but it may not be 10362 // robust on platforms with multiple physical devices. 10363 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 10364 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL", 10365 "Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first " 10366 "call vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount."); 10367 } // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state 10368 else if (my_data->instance_state->physical_devices_count != *pPhysicalDeviceCount) { 10369 // Having actual count match count from app is not a requirement, so this can be a warning 10370 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, 10371 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL", 10372 "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count " 10373 "supported by this instance is %u.", 10374 *pPhysicalDeviceCount, my_data->instance_state->physical_devices_count); 10375 } 10376 my_data->instance_state->vkEnumeratePhysicalDevicesState = QUERY_DETAILS; 10377 } 10378 if (skip_call) { 10379 return VK_ERROR_VALIDATION_FAILED_EXT; 10380 } 10381 VkResult result = 10382 my_data->instance_dispatch_table->EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices); 10383 if (NULL == pPhysicalDevices) { 10384 my_data->instance_state->physical_devices_count = *pPhysicalDeviceCount; 10385 } else { // Save physical devices 10386 for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) { 10387 layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(pPhysicalDevices[i]), layer_data_map); 10388 phy_dev_data->physical_device_state = unique_ptr<PHYSICAL_DEVICE_STATE>(new PHYSICAL_DEVICE_STATE()); 10389 // Init actual features for each physical device 10390 my_data->instance_dispatch_table->GetPhysicalDeviceFeatures(pPhysicalDevices[i], 10391 &phy_dev_data->physical_device_features); 10392 } 10393 } 10394 return result; 10395 } else { 10396 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__, 10397 DEVLIMITS_INVALID_INSTANCE, "DL", "Invalid instance (0x%" PRIxLEAST64 ") passed into vkEnumeratePhysicalDevices().", 10398 (uint64_t)instance); 10399 } 10400 return VK_ERROR_VALIDATION_FAILED_EXT; 10401} 10402 10403VKAPI_ATTR void VKAPI_CALL 10404GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, 10405 VkQueueFamilyProperties *pQueueFamilyProperties) { 10406 bool skip_call = false; 10407 layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map); 10408 if (phy_dev_data->physical_device_state) { 10409 if (NULL == pQueueFamilyProperties) { 10410 phy_dev_data->physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT; 10411 } 10412 else { 10413 // Verify that for each physical device, this function is called first with NULL pQueueFamilyProperties ptr in order to 10414 // get count 10415 if (UNCALLED == phy_dev_data->physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) { 10416 skip_call |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, 10417 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL", 10418 "Call sequence has vkGetPhysicalDeviceQueueFamilyProperties() w/ non-NULL " 10419 "pQueueFamilyProperties. You should first call vkGetPhysicalDeviceQueueFamilyProperties() w/ " 10420 "NULL pQueueFamilyProperties to query pCount."); 10421 } 10422 // Then verify that pCount that is passed in on second call matches what was returned 10423 if (phy_dev_data->physical_device_state->queueFamilyPropertiesCount != *pCount) { 10424 10425 // TODO: this is not a requirement of the Valid Usage section for vkGetPhysicalDeviceQueueFamilyProperties, so 10426 // provide as warning 10427 skip_call |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, 10428 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL", 10429 "Call to vkGetPhysicalDeviceQueueFamilyProperties() w/ pCount value %u, but actual count " 10430 "supported by this physicalDevice is %u.", 10431 *pCount, phy_dev_data->physical_device_state->queueFamilyPropertiesCount); 10432 } 10433 phy_dev_data->physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS; 10434 } 10435 if (skip_call) { 10436 return; 10437 } 10438 phy_dev_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pCount, 10439 pQueueFamilyProperties); 10440 if (NULL == pQueueFamilyProperties) { 10441 phy_dev_data->physical_device_state->queueFamilyPropertiesCount = *pCount; 10442 } 10443 else { // Save queue family properties 10444 phy_dev_data->queue_family_properties.reserve(*pCount); 10445 for (uint32_t i = 0; i < *pCount; i++) { 10446 phy_dev_data->queue_family_properties.emplace_back(new VkQueueFamilyProperties(pQueueFamilyProperties[i])); 10447 } 10448 } 10449 return; 10450 } 10451 else { 10452 log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, 10453 __LINE__, DEVLIMITS_INVALID_PHYSICAL_DEVICE, "DL", 10454 "Invalid physicalDevice (0x%" PRIxLEAST64 ") passed into vkGetPhysicalDeviceQueueFamilyProperties().", 10455 (uint64_t)physicalDevice); 10456 } 10457} 10458 10459VKAPI_ATTR VkResult VKAPI_CALL 10460CreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo, 10461 const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) { 10462 layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); 10463 VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table; 10464 VkResult res = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback); 10465 if (VK_SUCCESS == res) { 10466 std::lock_guard<std::mutex> lock(global_lock); 10467 res = layer_create_msg_callback(my_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback); 10468 } 10469 return res; 10470} 10471 10472VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance, 10473 VkDebugReportCallbackEXT msgCallback, 10474 const VkAllocationCallbacks *pAllocator) { 10475 layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); 10476 VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table; 10477 pTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator); 10478 std::lock_guard<std::mutex> lock(global_lock); 10479 layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator); 10480} 10481 10482VKAPI_ATTR void VKAPI_CALL 10483DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object, 10484 size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) { 10485 layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); 10486 my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, 10487 pMsg); 10488} 10489 10490VKAPI_ATTR VkResult VKAPI_CALL 10491EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) { 10492 return util_GetLayerProperties(1, &global_layer, pCount, pProperties); 10493} 10494 10495VKAPI_ATTR VkResult VKAPI_CALL 10496EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) { 10497 return util_GetLayerProperties(1, &global_layer, pCount, pProperties); 10498} 10499 10500VKAPI_ATTR VkResult VKAPI_CALL 10501EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) { 10502 if (pLayerName && !strcmp(pLayerName, global_layer.layerName)) 10503 return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties); 10504 10505 return VK_ERROR_LAYER_NOT_PRESENT; 10506} 10507 10508VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, 10509 const char *pLayerName, uint32_t *pCount, 10510 VkExtensionProperties *pProperties) { 10511 if (pLayerName && !strcmp(pLayerName, global_layer.layerName)) 10512 return util_GetExtensionProperties(0, NULL, pCount, pProperties); 10513 10514 assert(physicalDevice); 10515 10516 dispatch_key key = get_dispatch_key(physicalDevice); 10517 layer_data *my_data = get_my_data_ptr(key, layer_data_map); 10518 return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties); 10519} 10520 10521static PFN_vkVoidFunction 10522intercept_core_instance_command(const char *name); 10523 10524static PFN_vkVoidFunction 10525intercept_core_device_command(const char *name); 10526 10527static PFN_vkVoidFunction 10528intercept_khr_swapchain_command(const char *name, VkDevice dev); 10529 10530VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice dev, const char *funcName) { 10531 PFN_vkVoidFunction proc = intercept_core_device_command(funcName); 10532 if (proc) 10533 return proc; 10534 10535 assert(dev); 10536 10537 proc = intercept_khr_swapchain_command(funcName, dev); 10538 if (proc) 10539 return proc; 10540 10541 layer_data *dev_data; 10542 dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map); 10543 10544 VkLayerDispatchTable *pTable = dev_data->device_dispatch_table; 10545 { 10546 if (pTable->GetDeviceProcAddr == NULL) 10547 return NULL; 10548 return pTable->GetDeviceProcAddr(dev, funcName); 10549 } 10550} 10551 10552VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) { 10553 PFN_vkVoidFunction proc = intercept_core_instance_command(funcName); 10554 if (!proc) 10555 proc = intercept_core_device_command(funcName); 10556 if (!proc) 10557 proc = intercept_khr_swapchain_command(funcName, VK_NULL_HANDLE); 10558 if (proc) 10559 return proc; 10560 10561 assert(instance); 10562 10563 layer_data *my_data; 10564 my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); 10565 proc = debug_report_get_instance_proc_addr(my_data->report_data, funcName); 10566 if (proc) 10567 return proc; 10568 10569 VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table; 10570 if (pTable->GetInstanceProcAddr == NULL) 10571 return NULL; 10572 return pTable->GetInstanceProcAddr(instance, funcName); 10573} 10574 10575static PFN_vkVoidFunction 10576intercept_core_instance_command(const char *name) { 10577 static const struct { 10578 const char *name; 10579 PFN_vkVoidFunction proc; 10580 } core_instance_commands[] = { 10581 { "vkGetInstanceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetInstanceProcAddr) }, 10582 { "vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr) }, 10583 { "vkCreateInstance", reinterpret_cast<PFN_vkVoidFunction>(CreateInstance) }, 10584 { "vkCreateDevice", reinterpret_cast<PFN_vkVoidFunction>(CreateDevice) }, 10585 { "vkEnumeratePhysicalDevices", reinterpret_cast<PFN_vkVoidFunction>(EnumeratePhysicalDevices) }, 10586 { "vkGetPhysicalDeviceQueueFamilyProperties", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceQueueFamilyProperties) }, 10587 { "vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance) }, 10588 { "vkEnumerateInstanceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceLayerProperties) }, 10589 { "vkEnumerateDeviceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceLayerProperties) }, 10590 { "vkEnumerateInstanceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceExtensionProperties) }, 10591 { "vkEnumerateDeviceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceExtensionProperties) }, 10592 }; 10593 10594 for (size_t i = 0; i < ARRAY_SIZE(core_instance_commands); i++) { 10595 if (!strcmp(core_instance_commands[i].name, name)) 10596 return core_instance_commands[i].proc; 10597 } 10598 10599 return nullptr; 10600} 10601 10602static PFN_vkVoidFunction 10603intercept_core_device_command(const char *name) { 10604 static const struct { 10605 const char *name; 10606 PFN_vkVoidFunction proc; 10607 } core_device_commands[] = { 10608 {"vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr)}, 10609 {"vkQueueSubmit", reinterpret_cast<PFN_vkVoidFunction>(QueueSubmit)}, 10610 {"vkWaitForFences", reinterpret_cast<PFN_vkVoidFunction>(WaitForFences)}, 10611 {"vkGetFenceStatus", reinterpret_cast<PFN_vkVoidFunction>(GetFenceStatus)}, 10612 {"vkQueueWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(QueueWaitIdle)}, 10613 {"vkDeviceWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(DeviceWaitIdle)}, 10614 {"vkGetDeviceQueue", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceQueue)}, 10615 {"vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance)}, 10616 {"vkDestroyDevice", reinterpret_cast<PFN_vkVoidFunction>(DestroyDevice)}, 10617 {"vkDestroyFence", reinterpret_cast<PFN_vkVoidFunction>(DestroyFence)}, 10618 {"vkResetFences", reinterpret_cast<PFN_vkVoidFunction>(ResetFences)}, 10619 {"vkDestroySemaphore", reinterpret_cast<PFN_vkVoidFunction>(DestroySemaphore)}, 10620 {"vkDestroyEvent", reinterpret_cast<PFN_vkVoidFunction>(DestroyEvent)}, 10621 {"vkDestroyQueryPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyQueryPool)}, 10622 {"vkDestroyBuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyBuffer)}, 10623 {"vkDestroyBufferView", reinterpret_cast<PFN_vkVoidFunction>(DestroyBufferView)}, 10624 {"vkDestroyImage", reinterpret_cast<PFN_vkVoidFunction>(DestroyImage)}, 10625 {"vkDestroyImageView", reinterpret_cast<PFN_vkVoidFunction>(DestroyImageView)}, 10626 {"vkDestroyShaderModule", reinterpret_cast<PFN_vkVoidFunction>(DestroyShaderModule)}, 10627 {"vkDestroyPipeline", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipeline)}, 10628 {"vkDestroyPipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineLayout)}, 10629 {"vkDestroySampler", reinterpret_cast<PFN_vkVoidFunction>(DestroySampler)}, 10630 {"vkDestroyDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorSetLayout)}, 10631 {"vkDestroyDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorPool)}, 10632 {"vkDestroyFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyFramebuffer)}, 10633 {"vkDestroyRenderPass", reinterpret_cast<PFN_vkVoidFunction>(DestroyRenderPass)}, 10634 {"vkCreateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateBuffer)}, 10635 {"vkCreateBufferView", reinterpret_cast<PFN_vkVoidFunction>(CreateBufferView)}, 10636 {"vkCreateImage", reinterpret_cast<PFN_vkVoidFunction>(CreateImage)}, 10637 {"vkCreateImageView", reinterpret_cast<PFN_vkVoidFunction>(CreateImageView)}, 10638 {"vkCreateFence", reinterpret_cast<PFN_vkVoidFunction>(CreateFence)}, 10639 {"vkCreatePipelineCache", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineCache)}, 10640 {"vkDestroyPipelineCache", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineCache)}, 10641 {"vkGetPipelineCacheData", reinterpret_cast<PFN_vkVoidFunction>(GetPipelineCacheData)}, 10642 {"vkMergePipelineCaches", reinterpret_cast<PFN_vkVoidFunction>(MergePipelineCaches)}, 10643 {"vkCreateGraphicsPipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateGraphicsPipelines)}, 10644 {"vkCreateComputePipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateComputePipelines)}, 10645 {"vkCreateSampler", reinterpret_cast<PFN_vkVoidFunction>(CreateSampler)}, 10646 {"vkCreateDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorSetLayout)}, 10647 {"vkCreatePipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineLayout)}, 10648 {"vkCreateDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorPool)}, 10649 {"vkResetDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(ResetDescriptorPool)}, 10650 {"vkAllocateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(AllocateDescriptorSets)}, 10651 {"vkFreeDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(FreeDescriptorSets)}, 10652 {"vkUpdateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(UpdateDescriptorSets)}, 10653 {"vkCreateCommandPool", reinterpret_cast<PFN_vkVoidFunction>(CreateCommandPool)}, 10654 {"vkDestroyCommandPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyCommandPool)}, 10655 {"vkResetCommandPool", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandPool)}, 10656 {"vkCreateQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CreateQueryPool)}, 10657 {"vkAllocateCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(AllocateCommandBuffers)}, 10658 {"vkFreeCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(FreeCommandBuffers)}, 10659 {"vkBeginCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(BeginCommandBuffer)}, 10660 {"vkEndCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(EndCommandBuffer)}, 10661 {"vkResetCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandBuffer)}, 10662 {"vkCmdBindPipeline", reinterpret_cast<PFN_vkVoidFunction>(CmdBindPipeline)}, 10663 {"vkCmdSetViewport", reinterpret_cast<PFN_vkVoidFunction>(CmdSetViewport)}, 10664 {"vkCmdSetScissor", reinterpret_cast<PFN_vkVoidFunction>(CmdSetScissor)}, 10665 {"vkCmdSetLineWidth", reinterpret_cast<PFN_vkVoidFunction>(CmdSetLineWidth)}, 10666 {"vkCmdSetDepthBias", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBias)}, 10667 {"vkCmdSetBlendConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdSetBlendConstants)}, 10668 {"vkCmdSetDepthBounds", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBounds)}, 10669 {"vkCmdSetStencilCompareMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilCompareMask)}, 10670 {"vkCmdSetStencilWriteMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilWriteMask)}, 10671 {"vkCmdSetStencilReference", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilReference)}, 10672 {"vkCmdBindDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(CmdBindDescriptorSets)}, 10673 {"vkCmdBindVertexBuffers", reinterpret_cast<PFN_vkVoidFunction>(CmdBindVertexBuffers)}, 10674 {"vkCmdBindIndexBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdBindIndexBuffer)}, 10675 {"vkCmdDraw", reinterpret_cast<PFN_vkVoidFunction>(CmdDraw)}, 10676 {"vkCmdDrawIndexed", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexed)}, 10677 {"vkCmdDrawIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndirect)}, 10678 {"vkCmdDrawIndexedIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexedIndirect)}, 10679 {"vkCmdDispatch", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatch)}, 10680 {"vkCmdDispatchIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatchIndirect)}, 10681 {"vkCmdCopyBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBuffer)}, 10682 {"vkCmdCopyImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImage)}, 10683 {"vkCmdBlitImage", reinterpret_cast<PFN_vkVoidFunction>(CmdBlitImage)}, 10684 {"vkCmdCopyBufferToImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBufferToImage)}, 10685 {"vkCmdCopyImageToBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImageToBuffer)}, 10686 {"vkCmdUpdateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdUpdateBuffer)}, 10687 {"vkCmdFillBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdFillBuffer)}, 10688 {"vkCmdClearColorImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearColorImage)}, 10689 {"vkCmdClearDepthStencilImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearDepthStencilImage)}, 10690 {"vkCmdClearAttachments", reinterpret_cast<PFN_vkVoidFunction>(CmdClearAttachments)}, 10691 {"vkCmdResolveImage", reinterpret_cast<PFN_vkVoidFunction>(CmdResolveImage)}, 10692 {"vkCmdSetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdSetEvent)}, 10693 {"vkCmdResetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdResetEvent)}, 10694 {"vkCmdWaitEvents", reinterpret_cast<PFN_vkVoidFunction>(CmdWaitEvents)}, 10695 {"vkCmdPipelineBarrier", reinterpret_cast<PFN_vkVoidFunction>(CmdPipelineBarrier)}, 10696 {"vkCmdBeginQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginQuery)}, 10697 {"vkCmdEndQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdEndQuery)}, 10698 {"vkCmdResetQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CmdResetQueryPool)}, 10699 {"vkCmdCopyQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyQueryPoolResults)}, 10700 {"vkCmdPushConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdPushConstants)}, 10701 {"vkCmdWriteTimestamp", reinterpret_cast<PFN_vkVoidFunction>(CmdWriteTimestamp)}, 10702 {"vkCreateFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateFramebuffer)}, 10703 {"vkCreateShaderModule", reinterpret_cast<PFN_vkVoidFunction>(CreateShaderModule)}, 10704 {"vkCreateRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CreateRenderPass)}, 10705 {"vkCmdBeginRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginRenderPass)}, 10706 {"vkCmdNextSubpass", reinterpret_cast<PFN_vkVoidFunction>(CmdNextSubpass)}, 10707 {"vkCmdEndRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdEndRenderPass)}, 10708 {"vkCmdExecuteCommands", reinterpret_cast<PFN_vkVoidFunction>(CmdExecuteCommands)}, 10709 {"vkSetEvent", reinterpret_cast<PFN_vkVoidFunction>(SetEvent)}, 10710 {"vkMapMemory", reinterpret_cast<PFN_vkVoidFunction>(MapMemory)}, 10711 {"vkUnmapMemory", reinterpret_cast<PFN_vkVoidFunction>(UnmapMemory)}, 10712 {"vkFlushMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(FlushMappedMemoryRanges)}, 10713 {"vkInvalidateMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(InvalidateMappedMemoryRanges)}, 10714 {"vkAllocateMemory", reinterpret_cast<PFN_vkVoidFunction>(AllocateMemory)}, 10715 {"vkFreeMemory", reinterpret_cast<PFN_vkVoidFunction>(FreeMemory)}, 10716 {"vkBindBufferMemory", reinterpret_cast<PFN_vkVoidFunction>(BindBufferMemory)}, 10717 {"vkGetBufferMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetBufferMemoryRequirements)}, 10718 {"vkGetImageMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetImageMemoryRequirements)}, 10719 {"vkGetQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(GetQueryPoolResults)}, 10720 {"vkBindImageMemory", reinterpret_cast<PFN_vkVoidFunction>(BindImageMemory)}, 10721 {"vkQueueBindSparse", reinterpret_cast<PFN_vkVoidFunction>(QueueBindSparse)}, 10722 {"vkCreateSemaphore", reinterpret_cast<PFN_vkVoidFunction>(CreateSemaphore)}, 10723 {"vkCreateEvent", reinterpret_cast<PFN_vkVoidFunction>(CreateEvent)}, 10724 }; 10725 10726 for (size_t i = 0; i < ARRAY_SIZE(core_device_commands); i++) { 10727 if (!strcmp(core_device_commands[i].name, name)) 10728 return core_device_commands[i].proc; 10729 } 10730 10731 return nullptr; 10732} 10733 10734static PFN_vkVoidFunction 10735intercept_khr_swapchain_command(const char *name, VkDevice dev) { 10736 static const struct { 10737 const char *name; 10738 PFN_vkVoidFunction proc; 10739 } khr_swapchain_commands[] = { 10740 { "vkCreateSwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateSwapchainKHR) }, 10741 { "vkDestroySwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySwapchainKHR) }, 10742 { "vkGetSwapchainImagesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetSwapchainImagesKHR) }, 10743 { "vkAcquireNextImageKHR", reinterpret_cast<PFN_vkVoidFunction>(AcquireNextImageKHR) }, 10744 { "vkQueuePresentKHR", reinterpret_cast<PFN_vkVoidFunction>(QueuePresentKHR) }, 10745 }; 10746 10747 if (dev) { 10748 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map); 10749 if (!dev_data->device_extensions.wsi_enabled) 10750 return nullptr; 10751 } 10752 10753 for (size_t i = 0; i < ARRAY_SIZE(khr_swapchain_commands); i++) { 10754 if (!strcmp(khr_swapchain_commands[i].name, name)) 10755 return khr_swapchain_commands[i].proc; 10756 } 10757 10758 return nullptr; 10759} 10760 10761} // namespace core_validation 10762 10763// vk_layer_logging.h expects these to be defined 10764 10765VKAPI_ATTR VkResult VKAPI_CALL 10766vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo, 10767 const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) { 10768 return core_validation::CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback); 10769} 10770 10771VKAPI_ATTR void VKAPI_CALL 10772vkDestroyDebugReportCallbackEXT(VkInstance instance, 10773 VkDebugReportCallbackEXT msgCallback, 10774 const VkAllocationCallbacks *pAllocator) { 10775 core_validation::DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator); 10776} 10777 10778VKAPI_ATTR void VKAPI_CALL 10779vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object, 10780 size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) { 10781 core_validation::DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg); 10782} 10783 10784// loader-layer interface v0, just wrappers since there is only a layer 10785 10786VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 10787vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) { 10788 return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties); 10789} 10790 10791VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 10792vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) { 10793 return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties); 10794} 10795 10796VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 10797vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) { 10798 // the layer command handles VK_NULL_HANDLE just fine internally 10799 assert(physicalDevice == VK_NULL_HANDLE); 10800 return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties); 10801} 10802 10803VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, 10804 const char *pLayerName, uint32_t *pCount, 10805 VkExtensionProperties *pProperties) { 10806 // the layer command handles VK_NULL_HANDLE just fine internally 10807 assert(physicalDevice == VK_NULL_HANDLE); 10808 return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties); 10809} 10810 10811VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) { 10812 return core_validation::GetDeviceProcAddr(dev, funcName); 10813} 10814 10815VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) { 10816 return core_validation::GetInstanceProcAddr(instance, funcName); 10817} 10818