core_validation.cpp revision a265a8ed3bbb32ade305b1be3148d6001a870b76
1/* Copyright (c) 2015-2016 The Khronos Group Inc. 2 * Copyright (c) 2015-2016 Valve Corporation 3 * Copyright (c) 2015-2016 LunarG, Inc. 4 * Copyright (C) 2015-2016 Google Inc. 5 * 6 * Licensed under the Apache License, Version 2.0 (the "License"); 7 * you may not use this file except in compliance with the License. 8 * You may obtain a copy of the License at 9 * 10 * http://www.apache.org/licenses/LICENSE-2.0 11 * 12 * Unless required by applicable law or agreed to in writing, software 13 * distributed under the License is distributed on an "AS IS" BASIS, 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 * See the License for the specific language governing permissions and 16 * limitations under the License. 17 * 18 * Author: Cody Northrop <cnorthrop@google.com> 19 * Author: Michael Lentine <mlentine@google.com> 20 * Author: Tobin Ehlis <tobine@google.com> 21 * Author: Chia-I Wu <olv@google.com> 22 * Author: Chris Forbes <chrisf@ijw.co.nz> 23 * Author: Mark Lobodzinski <mark@lunarg.com> 24 * Author: Ian Elliott <ianelliott@google.com> 25 */ 26 27// Allow use of STL min and max functions in Windows 28#define NOMINMAX 29 30// Turn on mem_tracker merged code 31#define MTMERGESOURCE 1 32 33#include <SPIRV/spirv.hpp> 34#include <algorithm> 35#include <assert.h> 36#include <iostream> 37#include <list> 38#include <map> 39#include <mutex> 40#include <set> 41#include <stdio.h> 42#include <stdlib.h> 43#include <string.h> 44#include <string> 45#include <unordered_map> 46#include <unordered_set> 47 48#include "vk_loader_platform.h" 49#include "vk_dispatch_table_helper.h" 50#include "vk_struct_string_helper_cpp.h" 51#if defined(__GNUC__) 52#pragma GCC diagnostic ignored "-Wwrite-strings" 53#endif 54#if defined(__GNUC__) 55#pragma GCC diagnostic warning "-Wwrite-strings" 56#endif 57#include "vk_struct_size_helper.h" 58#include "core_validation.h" 59#include "vk_layer_config.h" 60#include "vk_layer_table.h" 61#include "vk_layer_data.h" 62#include "vk_layer_logging.h" 63#include "vk_layer_extension_utils.h" 64#include "vk_layer_utils.h" 65 66#if defined __ANDROID__ 67#include <android/log.h> 68#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__)) 69#else 70#define LOGCONSOLE(...) printf(__VA_ARGS__) 71#endif 72 73using std::unordered_map; 74using std::unordered_set; 75 76// WSI Image Objects bypass usual Image Object creation methods. A special Memory 77// Object value will be used to identify them internally. 78static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1); 79 80// Track command pools and their command buffers 81struct CMD_POOL_INFO { 82 VkCommandPoolCreateFlags createFlags; 83 uint32_t queueFamilyIndex; 84 list<VkCommandBuffer> commandBuffers; // list container of cmd buffers allocated from this pool 85}; 86 87struct devExts { 88 bool wsi_enabled; 89 unordered_map<VkSwapchainKHR, SWAPCHAIN_NODE *> swapchainMap; 90 unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap; 91}; 92 93// fwd decls 94struct shader_module; 95 96// TODO : Split this into separate structs for instance and device level data? 97struct layer_data { 98 debug_report_data *report_data; 99 std::vector<VkDebugReportCallbackEXT> logging_callback; 100 VkLayerDispatchTable *device_dispatch_table; 101 VkLayerInstanceDispatchTable *instance_dispatch_table; 102 103 devExts device_extensions; 104 unordered_set<VkQueue> queues; // all queues under given device 105 // Global set of all cmdBuffers that are inFlight on this device 106 unordered_set<VkCommandBuffer> globalInFlightCmdBuffers; 107 // Layer specific data 108 unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> sampleMap; 109 unordered_map<VkImageView, VkImageViewCreateInfo> imageViewMap; 110 unordered_map<VkImage, IMAGE_NODE> imageMap; 111 unordered_map<VkBufferView, VkBufferViewCreateInfo> bufferViewMap; 112 unordered_map<VkBuffer, BUFFER_NODE> bufferMap; 113 unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap; 114 unordered_map<VkCommandPool, CMD_POOL_INFO> commandPoolMap; 115 unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap; 116 unordered_map<VkDescriptorSet, SET_NODE *> setMap; 117 unordered_map<VkDescriptorSetLayout, LAYOUT_NODE *> descriptorSetLayoutMap; 118 unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap; 119 unordered_map<VkDeviceMemory, DEVICE_MEM_INFO> memObjMap; 120 unordered_map<VkFence, FENCE_NODE> fenceMap; 121 unordered_map<VkQueue, QUEUE_NODE> queueMap; 122 unordered_map<VkEvent, EVENT_NODE> eventMap; 123 unordered_map<QueryObject, bool> queryToStateMap; 124 unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap; 125 unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap; 126 unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap; 127 unordered_map<VkFramebuffer, FRAMEBUFFER_NODE> frameBufferMap; 128 unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap; 129 unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap; 130 unordered_map<VkRenderPass, RENDER_PASS_NODE *> renderPassMap; 131 unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap; 132 VkDevice device; 133 134 // Device specific data 135 PHYS_DEV_PROPERTIES_NODE phys_dev_properties; 136 VkPhysicalDeviceMemoryProperties phys_dev_mem_props; 137 138 layer_data() 139 : report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr), device_extensions(), 140 device(VK_NULL_HANDLE), phys_dev_properties{}, phys_dev_mem_props{} {}; 141}; 142 143// TODO : Do we need to guard access to layer_data_map w/ lock? 144static unordered_map<void *, layer_data *> layer_data_map; 145 146static const VkLayerProperties cv_global_layers[] = {{ 147 "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer", 148}}; 149 150template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) { 151 bool foundLayer = false; 152 for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) { 153 if (!strcmp(createInfo.ppEnabledLayerNames[i], cv_global_layers[0].layerName)) { 154 foundLayer = true; 155 } 156 // This has to be logged to console as we don't have a callback at this point. 157 if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) { 158 LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.", 159 cv_global_layers[0].layerName); 160 } 161 } 162} 163 164// Code imported from shader_checker 165static void build_def_index(shader_module *); 166 167// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words 168// without the caller needing to care too much about the physical SPIRV module layout. 169struct spirv_inst_iter { 170 std::vector<uint32_t>::const_iterator zero; 171 std::vector<uint32_t>::const_iterator it; 172 173 uint32_t len() { return *it >> 16; } 174 uint32_t opcode() { return *it & 0x0ffffu; } 175 uint32_t const &word(unsigned n) { return it[n]; } 176 uint32_t offset() { return (uint32_t)(it - zero); } 177 178 spirv_inst_iter() {} 179 180 spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {} 181 182 bool operator==(spirv_inst_iter const &other) { return it == other.it; } 183 184 bool operator!=(spirv_inst_iter const &other) { return it != other.it; } 185 186 spirv_inst_iter operator++(int) { /* x++ */ 187 spirv_inst_iter ii = *this; 188 it += len(); 189 return ii; 190 } 191 192 spirv_inst_iter operator++() { /* ++x; */ 193 it += len(); 194 return *this; 195 } 196 197 /* The iterator and the value are the same thing. */ 198 spirv_inst_iter &operator*() { return *this; } 199 spirv_inst_iter const &operator*() const { return *this; } 200}; 201 202struct shader_module { 203 /* the spirv image itself */ 204 vector<uint32_t> words; 205 /* a mapping of <id> to the first word of its def. this is useful because walking type 206 * trees, constant expressions, etc requires jumping all over the instruction stream. 207 */ 208 unordered_map<unsigned, unsigned> def_index; 209 210 shader_module(VkShaderModuleCreateInfo const *pCreateInfo) 211 : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)), 212 def_index() { 213 214 build_def_index(this); 215 } 216 217 /* expose begin() / end() to enable range-based for */ 218 spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */ 219 spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); } /* just past last insn */ 220 /* given an offset into the module, produce an iterator there. */ 221 spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); } 222 223 /* gets an iterator to the definition of an id */ 224 spirv_inst_iter get_def(unsigned id) const { 225 auto it = def_index.find(id); 226 if (it == def_index.end()) { 227 return end(); 228 } 229 return at(it->second); 230 } 231}; 232 233// TODO : This can be much smarter, using separate locks for separate global data 234static std::mutex global_lock; 235#if MTMERGESOURCE 236// MTMERGESOURCE - start of direct pull 237static VkDeviceMemory *get_object_mem_binding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) { 238 switch (type) { 239 case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: { 240 auto it = my_data->imageMap.find(VkImage(handle)); 241 if (it != my_data->imageMap.end()) 242 return &(*it).second.mem; 243 break; 244 } 245 case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: { 246 auto it = my_data->bufferMap.find(VkBuffer(handle)); 247 if (it != my_data->bufferMap.end()) 248 return &(*it).second.mem; 249 break; 250 } 251 default: 252 break; 253 } 254 return nullptr; 255} 256// MTMERGESOURCE - end section 257#endif 258template layer_data *get_my_data_ptr<layer_data>(void *data_key, std::unordered_map<void *, layer_data *> &data_map); 259 260// prototype 261static GLOBAL_CB_NODE *getCBNode(layer_data *, const VkCommandBuffer); 262 263#if MTMERGESOURCE 264// Helper function to validate correct usage bits set for buffers or images 265// Verify that (actual & desired) flags != 0 or, 266// if strict is true, verify that (actual & desired) flags == desired 267// In case of error, report it via dbg callbacks 268static bool validate_usage_flags(layer_data *my_data, VkFlags actual, VkFlags desired, VkBool32 strict, 269 uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str, 270 char const *func_name, char const *usage_str) { 271 bool correct_usage = false; 272 bool skipCall = false; 273 if (strict) 274 correct_usage = ((actual & desired) == desired); 275 else 276 correct_usage = ((actual & desired) != 0); 277 if (!correct_usage) { 278 skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__, 279 MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s %#" PRIxLEAST64 280 " used by %s. In this case, %s should have %s set during creation.", 281 ty_str, obj_handle, func_name, ty_str, usage_str); 282 } 283 return skipCall; 284} 285 286// Helper function to validate usage flags for images 287// Pulls image info and then sends actual vs. desired usage off to helper above where 288// an error will be flagged if usage is not correct 289static bool validate_image_usage_flags(layer_data *dev_data, VkImage image, VkFlags desired, VkBool32 strict, 290 char const *func_name, char const *usage_string) { 291 bool skipCall = false; 292 auto const image_node = dev_data->imageMap.find(image); 293 if (image_node != dev_data->imageMap.end()) { 294 skipCall = validate_usage_flags(dev_data, image_node->second.createInfo.usage, desired, strict, (uint64_t)image, 295 VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "image", func_name, usage_string); 296 } 297 return skipCall; 298} 299 300// Helper function to validate usage flags for buffers 301// Pulls buffer info and then sends actual vs. desired usage off to helper above where 302// an error will be flagged if usage is not correct 303static bool validate_buffer_usage_flags(layer_data *dev_data, VkBuffer buffer, VkFlags desired, VkBool32 strict, 304 char const *func_name, char const *usage_string) { 305 bool skipCall = false; 306 auto const buffer_node = dev_data->bufferMap.find(buffer); 307 if (buffer_node != dev_data->bufferMap.end()) { 308 skipCall = validate_usage_flags(dev_data, buffer_node->second.createInfo.usage, desired, strict, (uint64_t)buffer, 309 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "buffer", func_name, usage_string); 310 } 311 return skipCall; 312} 313 314// Return ptr to info in map container containing mem, or NULL if not found 315// Calls to this function should be wrapped in mutex 316static DEVICE_MEM_INFO *get_mem_obj_info(layer_data *dev_data, const VkDeviceMemory mem) { 317 auto item = dev_data->memObjMap.find(mem); 318 if (item != dev_data->memObjMap.end()) { 319 return &(*item).second; 320 } else { 321 return NULL; 322 } 323} 324 325static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem, 326 const VkMemoryAllocateInfo *pAllocateInfo) { 327 assert(object != NULL); 328 329 memcpy(&my_data->memObjMap[mem].allocInfo, pAllocateInfo, sizeof(VkMemoryAllocateInfo)); 330 // TODO: Update for real hardware, actually process allocation info structures 331 my_data->memObjMap[mem].allocInfo.pNext = NULL; 332 my_data->memObjMap[mem].object = object; 333 my_data->memObjMap[mem].mem = mem; 334 my_data->memObjMap[mem].image = VK_NULL_HANDLE; 335 my_data->memObjMap[mem].memRange.offset = 0; 336 my_data->memObjMap[mem].memRange.size = 0; 337 my_data->memObjMap[mem].pData = 0; 338 my_data->memObjMap[mem].pDriverData = 0; 339 my_data->memObjMap[mem].valid = false; 340} 341 342static bool validate_memory_is_valid(layer_data *dev_data, VkDeviceMemory mem, const char *functionName, 343 VkImage image = VK_NULL_HANDLE) { 344 if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) { 345 auto const image_node = dev_data->imageMap.find(image); 346 if (image_node != dev_data->imageMap.end() && !image_node->second.valid) { 347 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 348 (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM", 349 "%s: Cannot read invalid swapchain image %" PRIx64 ", please fill the memory before using.", 350 functionName, (uint64_t)(image)); 351 } 352 } else { 353 DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem); 354 if (pMemObj && !pMemObj->valid) { 355 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 356 (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM", 357 "%s: Cannot read invalid memory %" PRIx64 ", please fill the memory before using.", functionName, 358 (uint64_t)(mem)); 359 } 360 } 361 return false; 362} 363 364static void set_memory_valid(layer_data *dev_data, VkDeviceMemory mem, bool valid, VkImage image = VK_NULL_HANDLE) { 365 if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) { 366 auto image_node = dev_data->imageMap.find(image); 367 if (image_node != dev_data->imageMap.end()) { 368 image_node->second.valid = valid; 369 } 370 } else { 371 DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem); 372 if (pMemObj) { 373 pMemObj->valid = valid; 374 } 375 } 376} 377 378// Find CB Info and add mem reference to list container 379// Find Mem Obj Info and add CB reference to list container 380static bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem, 381 const char *apiName) { 382 bool skipCall = false; 383 384 // Skip validation if this image was created through WSI 385 if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) { 386 387 // First update CB binding in MemObj mini CB list 388 DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem); 389 if (pMemInfo) { 390 pMemInfo->commandBufferBindings.insert(cb); 391 // Now update CBInfo's Mem reference list 392 GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb); 393 // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object 394 if (pCBNode) { 395 pCBNode->memObjs.insert(mem); 396 } 397 } 398 } 399 return skipCall; 400} 401// For every mem obj bound to particular CB, free bindings related to that CB 402static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *pCBNode) { 403 if (pCBNode) { 404 if (pCBNode->memObjs.size() > 0) { 405 for (auto mem : pCBNode->memObjs) { 406 DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem); 407 if (pInfo) { 408 pInfo->commandBufferBindings.erase(pCBNode->commandBuffer); 409 } 410 } 411 pCBNode->memObjs.clear(); 412 } 413 pCBNode->validate_functions.clear(); 414 } 415} 416// Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up 417static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) { 418 clear_cmd_buf_and_mem_references(dev_data, getCBNode(dev_data, cb)); 419} 420 421// For given MemObjInfo, report Obj & CB bindings 422static bool reportMemReferencesAndCleanUp(layer_data *dev_data, DEVICE_MEM_INFO *pMemObjInfo) { 423 bool skipCall = false; 424 size_t cmdBufRefCount = pMemObjInfo->commandBufferBindings.size(); 425 size_t objRefCount = pMemObjInfo->objBindings.size(); 426 427 if ((pMemObjInfo->commandBufferBindings.size()) != 0) { 428 skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 429 (uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM", 430 "Attempting to free memory object %#" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER 431 " references", 432 (uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount)); 433 } 434 435 if (cmdBufRefCount > 0 && pMemObjInfo->commandBufferBindings.size() > 0) { 436 for (auto cb : pMemObjInfo->commandBufferBindings) { 437 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 438 (uint64_t)cb, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM", 439 "Command Buffer %p still has a reference to mem obj %#" PRIxLEAST64, cb, (uint64_t)pMemObjInfo->mem); 440 } 441 // Clear the list of hanging references 442 pMemObjInfo->commandBufferBindings.clear(); 443 } 444 445 if (objRefCount > 0 && pMemObjInfo->objBindings.size() > 0) { 446 for (auto obj : pMemObjInfo->objBindings) { 447 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__, 448 MEMTRACK_FREED_MEM_REF, "MEM", "VK Object %#" PRIxLEAST64 " still has a reference to mem obj %#" PRIxLEAST64, 449 obj.handle, (uint64_t)pMemObjInfo->mem); 450 } 451 // Clear the list of hanging references 452 pMemObjInfo->objBindings.clear(); 453 } 454 return skipCall; 455} 456 457static bool deleteMemObjInfo(layer_data *my_data, void *object, VkDeviceMemory mem) { 458 bool skipCall = false; 459 auto item = my_data->memObjMap.find(mem); 460 if (item != my_data->memObjMap.end()) { 461 my_data->memObjMap.erase(item); 462 } else { 463 skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 464 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM", 465 "Request to delete memory object %#" PRIxLEAST64 " not present in memory Object Map", (uint64_t)mem); 466 } 467 return skipCall; 468} 469 470static bool freeMemObjInfo(layer_data *dev_data, void *object, VkDeviceMemory mem, bool internal) { 471 bool skipCall = false; 472 // Parse global list to find info w/ mem 473 DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem); 474 if (pInfo) { 475 if (pInfo->allocInfo.allocationSize == 0 && !internal) { 476 // TODO: Verify against Valid Use section 477 skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 478 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM", 479 "Attempting to free memory associated with a Persistent Image, %#" PRIxLEAST64 ", " 480 "this should not be explicitly freed\n", 481 (uint64_t)mem); 482 } else { 483 // Clear any CB bindings for completed CBs 484 // TODO : Is there a better place to do this? 485 486 assert(pInfo->object != VK_NULL_HANDLE); 487 // clear_cmd_buf_and_mem_references removes elements from 488 // pInfo->commandBufferBindings -- this copy not needed in c++14, 489 // and probably not needed in practice in c++11 490 auto bindings = pInfo->commandBufferBindings; 491 for (auto cb : bindings) { 492 if (!dev_data->globalInFlightCmdBuffers.count(cb)) { 493 clear_cmd_buf_and_mem_references(dev_data, cb); 494 } 495 } 496 497 // Now verify that no references to this mem obj remain and remove bindings 498 if (pInfo->commandBufferBindings.size() || pInfo->objBindings.size()) { 499 skipCall |= reportMemReferencesAndCleanUp(dev_data, pInfo); 500 } 501 // Delete mem obj info 502 skipCall |= deleteMemObjInfo(dev_data, object, mem); 503 } 504 } 505 return skipCall; 506} 507 508static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) { 509 switch (type) { 510 case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: 511 return "image"; 512 case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: 513 return "buffer"; 514 case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT: 515 return "swapchain"; 516 default: 517 return "unknown"; 518 } 519} 520 521// Remove object binding performs 3 tasks: 522// 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it 523// 2. Clear mem binding for image/buffer by setting its handle to 0 524// TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized? 525static bool clear_object_binding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) { 526 // TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately 527 bool skipCall = false; 528 VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type); 529 if (pMemBinding) { 530 DEVICE_MEM_INFO *pMemObjInfo = get_mem_obj_info(dev_data, *pMemBinding); 531 // TODO : Make sure this is a reasonable way to reset mem binding 532 *pMemBinding = VK_NULL_HANDLE; 533 if (pMemObjInfo) { 534 // This obj is bound to a memory object. Remove the reference to this object in that memory object's list, 535 // and set the objects memory binding pointer to NULL. 536 if (!pMemObjInfo->objBindings.erase({handle, type})) { 537 skipCall |= 538 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT, 539 "MEM", "While trying to clear mem binding for %s obj %#" PRIxLEAST64 540 ", unable to find that object referenced by mem obj %#" PRIxLEAST64, 541 object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem); 542 } 543 } 544 } 545 return skipCall; 546} 547 548// For NULL mem case, output warning 549// Make sure given object is in global object map 550// IF a previous binding existed, output validation error 551// Otherwise, add reference from objectInfo to memoryInfo 552// Add reference off of objInfo 553static bool set_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, 554 VkDebugReportObjectTypeEXT type, const char *apiName) { 555 bool skipCall = false; 556 // Handle NULL case separately, just clear previous binding & decrement reference 557 if (mem == VK_NULL_HANDLE) { 558 // TODO: Verify against Valid Use section of spec. 559 skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ, 560 "MEM", "In %s, attempting to Bind Obj(%#" PRIxLEAST64 ") to NULL", apiName, handle); 561 } else { 562 VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type); 563 if (!pMemBinding) { 564 skipCall |= 565 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS, 566 "MEM", "In %s, attempting to update Binding of %s Obj(%#" PRIxLEAST64 ") that's not in global list", 567 object_type_to_string(type), apiName, handle); 568 } else { 569 // non-null case so should have real mem obj 570 DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem); 571 if (pMemInfo) { 572 DEVICE_MEM_INFO *pPrevBinding = get_mem_obj_info(dev_data, *pMemBinding); 573 if (pPrevBinding != NULL) { 574 skipCall |= 575 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 576 (uint64_t)mem, __LINE__, MEMTRACK_REBIND_OBJECT, "MEM", 577 "In %s, attempting to bind memory (%#" PRIxLEAST64 ") to object (%#" PRIxLEAST64 578 ") which has already been bound to mem object %#" PRIxLEAST64, 579 apiName, (uint64_t)mem, handle, (uint64_t)pPrevBinding->mem); 580 } else { 581 pMemInfo->objBindings.insert({handle, type}); 582 // For image objects, make sure default memory state is correctly set 583 // TODO : What's the best/correct way to handle this? 584 if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) { 585 auto const image_node = dev_data->imageMap.find(VkImage(handle)); 586 if (image_node != dev_data->imageMap.end()) { 587 VkImageCreateInfo ici = image_node->second.createInfo; 588 if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) { 589 // TODO:: More memory state transition stuff. 590 } 591 } 592 } 593 *pMemBinding = mem; 594 } 595 } 596 } 597 } 598 return skipCall; 599} 600 601// For NULL mem case, clear any previous binding Else... 602// Make sure given object is in its object map 603// IF a previous binding existed, update binding 604// Add reference from objectInfo to memoryInfo 605// Add reference off of object's binding info 606// Return VK_TRUE if addition is successful, VK_FALSE otherwise 607static bool set_sparse_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, 608 VkDebugReportObjectTypeEXT type, const char *apiName) { 609 bool skipCall = VK_FALSE; 610 // Handle NULL case separately, just clear previous binding & decrement reference 611 if (mem == VK_NULL_HANDLE) { 612 skipCall = clear_object_binding(dev_data, handle, type); 613 } else { 614 VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type); 615 if (!pMemBinding) { 616 skipCall |= log_msg( 617 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS, "MEM", 618 "In %s, attempting to update Binding of Obj(%#" PRIxLEAST64 ") that's not in global list()", apiName, handle); 619 } else { 620 // non-null case so should have real mem obj 621 DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem); 622 if (pInfo) { 623 pInfo->objBindings.insert({handle, type}); 624 // Need to set mem binding for this object 625 *pMemBinding = mem; 626 } 627 } 628 } 629 return skipCall; 630} 631 632// For given Object, get 'mem' obj that it's bound to or NULL if no binding 633static bool get_mem_binding_from_object(layer_data *dev_data, const uint64_t handle, 634 const VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) { 635 bool skipCall = false; 636 *mem = VK_NULL_HANDLE; 637 VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type); 638 if (pMemBinding) { 639 *mem = *pMemBinding; 640 } else { 641 skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT, 642 "MEM", "Trying to get mem binding for object %#" PRIxLEAST64 " but no such object in %s list", handle, 643 object_type_to_string(type)); 644 } 645 return skipCall; 646} 647 648// Print details of MemObjInfo list 649static void print_mem_list(layer_data *dev_data) { 650 DEVICE_MEM_INFO *pInfo = NULL; 651 652 // Early out if info is not requested 653 if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) { 654 return; 655 } 656 657 // Just printing each msg individually for now, may want to package these into single large print 658 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__, 659 MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)", 660 dev_data->memObjMap.size()); 661 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__, 662 MEMTRACK_NONE, "MEM", "============================="); 663 664 if (dev_data->memObjMap.size() <= 0) 665 return; 666 667 for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) { 668 pInfo = &(*ii).second; 669 670 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, 671 __LINE__, MEMTRACK_NONE, "MEM", " ===MemObjInfo at %p===", (void *)pInfo); 672 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, 673 __LINE__, MEMTRACK_NONE, "MEM", " Mem object: %#" PRIxLEAST64, (uint64_t)(pInfo->mem)); 674 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, 675 __LINE__, MEMTRACK_NONE, "MEM", " Ref Count: " PRINTF_SIZE_T_SPECIFIER, 676 pInfo->commandBufferBindings.size() + pInfo->objBindings.size()); 677 if (0 != pInfo->allocInfo.allocationSize) { 678 string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&pInfo->allocInfo, "MEM(INFO): "); 679 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, 680 __LINE__, MEMTRACK_NONE, "MEM", " Mem Alloc info:\n%s", pAllocInfoMsg.c_str()); 681 } else { 682 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, 683 __LINE__, MEMTRACK_NONE, "MEM", " Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())"); 684 } 685 686 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, 687 __LINE__, MEMTRACK_NONE, "MEM", " VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:", 688 pInfo->objBindings.size()); 689 if (pInfo->objBindings.size() > 0) { 690 for (auto obj : pInfo->objBindings) { 691 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 692 0, __LINE__, MEMTRACK_NONE, "MEM", " VK OBJECT %" PRIu64, obj.handle); 693 } 694 } 695 696 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, 697 __LINE__, MEMTRACK_NONE, "MEM", 698 " VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements", 699 pInfo->commandBufferBindings.size()); 700 if (pInfo->commandBufferBindings.size() > 0) { 701 for (auto cb : pInfo->commandBufferBindings) { 702 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 703 0, __LINE__, MEMTRACK_NONE, "MEM", " VK CB %p", cb); 704 } 705 } 706 } 707} 708 709static void printCBList(layer_data *my_data) { 710 GLOBAL_CB_NODE *pCBInfo = NULL; 711 712 // Early out if info is not requested 713 if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) { 714 return; 715 } 716 717 log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__, 718 MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)", 719 my_data->commandBufferMap.size()); 720 log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__, 721 MEMTRACK_NONE, "MEM", "=================="); 722 723 if (my_data->commandBufferMap.size() <= 0) 724 return; 725 726 for (auto &cb_node : my_data->commandBufferMap) { 727 pCBInfo = cb_node.second; 728 729 log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, 730 __LINE__, MEMTRACK_NONE, "MEM", " CB Info (%p) has CB %p", (void *)pCBInfo, (void *)pCBInfo->commandBuffer); 731 732 if (pCBInfo->memObjs.size() <= 0) 733 continue; 734 for (auto obj : pCBInfo->memObjs) { 735 log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, 736 __LINE__, MEMTRACK_NONE, "MEM", " Mem obj %" PRIu64, (uint64_t)obj); 737 } 738 } 739} 740 741#endif 742 743// Return a string representation of CMD_TYPE enum 744static string cmdTypeToString(CMD_TYPE cmd) { 745 switch (cmd) { 746 case CMD_BINDPIPELINE: 747 return "CMD_BINDPIPELINE"; 748 case CMD_BINDPIPELINEDELTA: 749 return "CMD_BINDPIPELINEDELTA"; 750 case CMD_SETVIEWPORTSTATE: 751 return "CMD_SETVIEWPORTSTATE"; 752 case CMD_SETLINEWIDTHSTATE: 753 return "CMD_SETLINEWIDTHSTATE"; 754 case CMD_SETDEPTHBIASSTATE: 755 return "CMD_SETDEPTHBIASSTATE"; 756 case CMD_SETBLENDSTATE: 757 return "CMD_SETBLENDSTATE"; 758 case CMD_SETDEPTHBOUNDSSTATE: 759 return "CMD_SETDEPTHBOUNDSSTATE"; 760 case CMD_SETSTENCILREADMASKSTATE: 761 return "CMD_SETSTENCILREADMASKSTATE"; 762 case CMD_SETSTENCILWRITEMASKSTATE: 763 return "CMD_SETSTENCILWRITEMASKSTATE"; 764 case CMD_SETSTENCILREFERENCESTATE: 765 return "CMD_SETSTENCILREFERENCESTATE"; 766 case CMD_BINDDESCRIPTORSETS: 767 return "CMD_BINDDESCRIPTORSETS"; 768 case CMD_BINDINDEXBUFFER: 769 return "CMD_BINDINDEXBUFFER"; 770 case CMD_BINDVERTEXBUFFER: 771 return "CMD_BINDVERTEXBUFFER"; 772 case CMD_DRAW: 773 return "CMD_DRAW"; 774 case CMD_DRAWINDEXED: 775 return "CMD_DRAWINDEXED"; 776 case CMD_DRAWINDIRECT: 777 return "CMD_DRAWINDIRECT"; 778 case CMD_DRAWINDEXEDINDIRECT: 779 return "CMD_DRAWINDEXEDINDIRECT"; 780 case CMD_DISPATCH: 781 return "CMD_DISPATCH"; 782 case CMD_DISPATCHINDIRECT: 783 return "CMD_DISPATCHINDIRECT"; 784 case CMD_COPYBUFFER: 785 return "CMD_COPYBUFFER"; 786 case CMD_COPYIMAGE: 787 return "CMD_COPYIMAGE"; 788 case CMD_BLITIMAGE: 789 return "CMD_BLITIMAGE"; 790 case CMD_COPYBUFFERTOIMAGE: 791 return "CMD_COPYBUFFERTOIMAGE"; 792 case CMD_COPYIMAGETOBUFFER: 793 return "CMD_COPYIMAGETOBUFFER"; 794 case CMD_CLONEIMAGEDATA: 795 return "CMD_CLONEIMAGEDATA"; 796 case CMD_UPDATEBUFFER: 797 return "CMD_UPDATEBUFFER"; 798 case CMD_FILLBUFFER: 799 return "CMD_FILLBUFFER"; 800 case CMD_CLEARCOLORIMAGE: 801 return "CMD_CLEARCOLORIMAGE"; 802 case CMD_CLEARATTACHMENTS: 803 return "CMD_CLEARCOLORATTACHMENT"; 804 case CMD_CLEARDEPTHSTENCILIMAGE: 805 return "CMD_CLEARDEPTHSTENCILIMAGE"; 806 case CMD_RESOLVEIMAGE: 807 return "CMD_RESOLVEIMAGE"; 808 case CMD_SETEVENT: 809 return "CMD_SETEVENT"; 810 case CMD_RESETEVENT: 811 return "CMD_RESETEVENT"; 812 case CMD_WAITEVENTS: 813 return "CMD_WAITEVENTS"; 814 case CMD_PIPELINEBARRIER: 815 return "CMD_PIPELINEBARRIER"; 816 case CMD_BEGINQUERY: 817 return "CMD_BEGINQUERY"; 818 case CMD_ENDQUERY: 819 return "CMD_ENDQUERY"; 820 case CMD_RESETQUERYPOOL: 821 return "CMD_RESETQUERYPOOL"; 822 case CMD_COPYQUERYPOOLRESULTS: 823 return "CMD_COPYQUERYPOOLRESULTS"; 824 case CMD_WRITETIMESTAMP: 825 return "CMD_WRITETIMESTAMP"; 826 case CMD_INITATOMICCOUNTERS: 827 return "CMD_INITATOMICCOUNTERS"; 828 case CMD_LOADATOMICCOUNTERS: 829 return "CMD_LOADATOMICCOUNTERS"; 830 case CMD_SAVEATOMICCOUNTERS: 831 return "CMD_SAVEATOMICCOUNTERS"; 832 case CMD_BEGINRENDERPASS: 833 return "CMD_BEGINRENDERPASS"; 834 case CMD_ENDRENDERPASS: 835 return "CMD_ENDRENDERPASS"; 836 default: 837 return "UNKNOWN"; 838 } 839} 840 841// SPIRV utility functions 842static void build_def_index(shader_module *module) { 843 for (auto insn : *module) { 844 switch (insn.opcode()) { 845 /* Types */ 846 case spv::OpTypeVoid: 847 case spv::OpTypeBool: 848 case spv::OpTypeInt: 849 case spv::OpTypeFloat: 850 case spv::OpTypeVector: 851 case spv::OpTypeMatrix: 852 case spv::OpTypeImage: 853 case spv::OpTypeSampler: 854 case spv::OpTypeSampledImage: 855 case spv::OpTypeArray: 856 case spv::OpTypeRuntimeArray: 857 case spv::OpTypeStruct: 858 case spv::OpTypeOpaque: 859 case spv::OpTypePointer: 860 case spv::OpTypeFunction: 861 case spv::OpTypeEvent: 862 case spv::OpTypeDeviceEvent: 863 case spv::OpTypeReserveId: 864 case spv::OpTypeQueue: 865 case spv::OpTypePipe: 866 module->def_index[insn.word(1)] = insn.offset(); 867 break; 868 869 /* Fixed constants */ 870 case spv::OpConstantTrue: 871 case spv::OpConstantFalse: 872 case spv::OpConstant: 873 case spv::OpConstantComposite: 874 case spv::OpConstantSampler: 875 case spv::OpConstantNull: 876 module->def_index[insn.word(2)] = insn.offset(); 877 break; 878 879 /* Specialization constants */ 880 case spv::OpSpecConstantTrue: 881 case spv::OpSpecConstantFalse: 882 case spv::OpSpecConstant: 883 case spv::OpSpecConstantComposite: 884 case spv::OpSpecConstantOp: 885 module->def_index[insn.word(2)] = insn.offset(); 886 break; 887 888 /* Variables */ 889 case spv::OpVariable: 890 module->def_index[insn.word(2)] = insn.offset(); 891 break; 892 893 /* Functions */ 894 case spv::OpFunction: 895 module->def_index[insn.word(2)] = insn.offset(); 896 break; 897 898 default: 899 /* We don't care about any other defs for now. */ 900 break; 901 } 902 } 903} 904 905static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) { 906 for (auto insn : *src) { 907 if (insn.opcode() == spv::OpEntryPoint) { 908 auto entrypointName = (char const *)&insn.word(3); 909 auto entrypointStageBits = 1u << insn.word(1); 910 911 if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) { 912 return insn; 913 } 914 } 915 } 916 917 return src->end(); 918} 919 920bool shader_is_spirv(VkShaderModuleCreateInfo const *pCreateInfo) { 921 uint32_t *words = (uint32_t *)pCreateInfo->pCode; 922 size_t sizeInWords = pCreateInfo->codeSize / sizeof(uint32_t); 923 924 /* Just validate that the header makes sense. */ 925 return sizeInWords >= 5 && words[0] == spv::MagicNumber && words[1] == spv::Version; 926} 927 928static char const *storage_class_name(unsigned sc) { 929 switch (sc) { 930 case spv::StorageClassInput: 931 return "input"; 932 case spv::StorageClassOutput: 933 return "output"; 934 case spv::StorageClassUniformConstant: 935 return "const uniform"; 936 case spv::StorageClassUniform: 937 return "uniform"; 938 case spv::StorageClassWorkgroup: 939 return "workgroup local"; 940 case spv::StorageClassCrossWorkgroup: 941 return "workgroup global"; 942 case spv::StorageClassPrivate: 943 return "private global"; 944 case spv::StorageClassFunction: 945 return "function"; 946 case spv::StorageClassGeneric: 947 return "generic"; 948 case spv::StorageClassAtomicCounter: 949 return "atomic counter"; 950 case spv::StorageClassImage: 951 return "image"; 952 case spv::StorageClassPushConstant: 953 return "push constant"; 954 default: 955 return "unknown"; 956 } 957} 958 959/* get the value of an integral constant */ 960unsigned get_constant_value(shader_module const *src, unsigned id) { 961 auto value = src->get_def(id); 962 assert(value != src->end()); 963 964 if (value.opcode() != spv::OpConstant) { 965 /* TODO: Either ensure that the specialization transform is already performed on a module we're 966 considering here, OR -- specialize on the fly now. 967 */ 968 return 1; 969 } 970 971 return value.word(3); 972} 973 974 975static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) { 976 auto insn = src->get_def(type); 977 assert(insn != src->end()); 978 979 switch (insn.opcode()) { 980 case spv::OpTypeBool: 981 ss << "bool"; 982 break; 983 case spv::OpTypeInt: 984 ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2); 985 break; 986 case spv::OpTypeFloat: 987 ss << "float" << insn.word(2); 988 break; 989 case spv::OpTypeVector: 990 ss << "vec" << insn.word(3) << " of "; 991 describe_type_inner(ss, src, insn.word(2)); 992 break; 993 case spv::OpTypeMatrix: 994 ss << "mat" << insn.word(3) << " of "; 995 describe_type_inner(ss, src, insn.word(2)); 996 break; 997 case spv::OpTypeArray: 998 ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of "; 999 describe_type_inner(ss, src, insn.word(2)); 1000 break; 1001 case spv::OpTypePointer: 1002 ss << "ptr to " << storage_class_name(insn.word(2)) << " "; 1003 describe_type_inner(ss, src, insn.word(3)); 1004 break; 1005 case spv::OpTypeStruct: { 1006 ss << "struct of ("; 1007 for (unsigned i = 2; i < insn.len(); i++) { 1008 describe_type_inner(ss, src, insn.word(i)); 1009 if (i == insn.len() - 1) { 1010 ss << ")"; 1011 } else { 1012 ss << ", "; 1013 } 1014 } 1015 break; 1016 } 1017 case spv::OpTypeSampler: 1018 ss << "sampler"; 1019 break; 1020 case spv::OpTypeSampledImage: 1021 ss << "sampler+"; 1022 describe_type_inner(ss, src, insn.word(2)); 1023 break; 1024 case spv::OpTypeImage: 1025 ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")"; 1026 break; 1027 default: 1028 ss << "oddtype"; 1029 break; 1030 } 1031} 1032 1033 1034static std::string describe_type(shader_module const *src, unsigned type) { 1035 std::ostringstream ss; 1036 describe_type_inner(ss, src, type); 1037 return ss.str(); 1038} 1039 1040 1041static bool is_narrow_numeric_type(spirv_inst_iter type) 1042{ 1043 if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat) 1044 return false; 1045 return type.word(2) < 64; 1046} 1047 1048 1049static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed, bool b_arrayed, bool relaxed) { 1050 /* walk two type trees together, and complain about differences */ 1051 auto a_insn = a->get_def(a_type); 1052 auto b_insn = b->get_def(b_type); 1053 assert(a_insn != a->end()); 1054 assert(b_insn != b->end()); 1055 1056 if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) { 1057 return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed); 1058 } 1059 1060 if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) { 1061 /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */ 1062 return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed); 1063 } 1064 1065 if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) { 1066 return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false); 1067 } 1068 1069 if (a_insn.opcode() != b_insn.opcode()) { 1070 return false; 1071 } 1072 1073 if (a_insn.opcode() == spv::OpTypePointer) { 1074 /* match on pointee type. storage class is expected to differ */ 1075 return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed); 1076 } 1077 1078 if (a_arrayed || b_arrayed) { 1079 /* if we havent resolved array-of-verts by here, we're not going to. */ 1080 return false; 1081 } 1082 1083 switch (a_insn.opcode()) { 1084 case spv::OpTypeBool: 1085 return true; 1086 case spv::OpTypeInt: 1087 /* match on width, signedness */ 1088 return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3); 1089 case spv::OpTypeFloat: 1090 /* match on width */ 1091 return a_insn.word(2) == b_insn.word(2); 1092 case spv::OpTypeVector: 1093 /* match on element type, count. */ 1094 if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false)) 1095 return false; 1096 if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) { 1097 return a_insn.word(3) >= b_insn.word(3); 1098 } 1099 else { 1100 return a_insn.word(3) == b_insn.word(3); 1101 } 1102 case spv::OpTypeMatrix: 1103 /* match on element type, count. */ 1104 return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && a_insn.word(3) == b_insn.word(3); 1105 case spv::OpTypeArray: 1106 /* match on element type, count. these all have the same layout. we don't get here if 1107 * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction, 1108 * not a literal within OpTypeArray */ 1109 return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && 1110 get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3)); 1111 case spv::OpTypeStruct: 1112 /* match on all element types */ 1113 { 1114 if (a_insn.len() != b_insn.len()) { 1115 return false; /* structs cannot match if member counts differ */ 1116 } 1117 1118 for (unsigned i = 2; i < a_insn.len(); i++) { 1119 if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) { 1120 return false; 1121 } 1122 } 1123 1124 return true; 1125 } 1126 default: 1127 /* remaining types are CLisms, or may not appear in the interfaces we 1128 * are interested in. Just claim no match. 1129 */ 1130 return false; 1131 } 1132} 1133 1134static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) { 1135 auto it = map.find(id); 1136 if (it == map.end()) 1137 return def; 1138 else 1139 return it->second; 1140} 1141 1142static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) { 1143 auto insn = src->get_def(type); 1144 assert(insn != src->end()); 1145 1146 switch (insn.opcode()) { 1147 case spv::OpTypePointer: 1148 /* see through the ptr -- this is only ever at the toplevel for graphics shaders; 1149 * we're never actually passing pointers around. */ 1150 return get_locations_consumed_by_type(src, insn.word(3), strip_array_level); 1151 case spv::OpTypeArray: 1152 if (strip_array_level) { 1153 return get_locations_consumed_by_type(src, insn.word(2), false); 1154 } else { 1155 return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false); 1156 } 1157 case spv::OpTypeMatrix: 1158 /* num locations is the dimension * element size */ 1159 return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false); 1160 case spv::OpTypeVector: { 1161 auto scalar_type = src->get_def(insn.word(2)); 1162 auto bit_width = (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ? 1163 scalar_type.word(2) : 32; 1164 1165 /* locations are 128-bit wide; 3- and 4-component vectors of 64 bit 1166 * types require two. */ 1167 return (bit_width * insn.word(3) + 127) / 128; 1168 } 1169 default: 1170 /* everything else is just 1. */ 1171 return 1; 1172 1173 /* TODO: extend to handle 64bit scalar types, whose vectors may need 1174 * multiple locations. */ 1175 } 1176} 1177 1178static unsigned get_locations_consumed_by_format(VkFormat format) { 1179 switch (format) { 1180 case VK_FORMAT_R64G64B64A64_SFLOAT: 1181 case VK_FORMAT_R64G64B64A64_SINT: 1182 case VK_FORMAT_R64G64B64A64_UINT: 1183 case VK_FORMAT_R64G64B64_SFLOAT: 1184 case VK_FORMAT_R64G64B64_SINT: 1185 case VK_FORMAT_R64G64B64_UINT: 1186 return 2; 1187 default: 1188 return 1; 1189 } 1190} 1191 1192typedef std::pair<unsigned, unsigned> location_t; 1193typedef std::pair<unsigned, unsigned> descriptor_slot_t; 1194 1195struct interface_var { 1196 uint32_t id; 1197 uint32_t type_id; 1198 uint32_t offset; 1199 bool is_patch; 1200 bool is_block_member; 1201 /* TODO: collect the name, too? Isn't required to be present. */ 1202}; 1203 1204struct shader_stage_attributes { 1205 char const *const name; 1206 bool arrayed_input; 1207 bool arrayed_output; 1208}; 1209 1210static shader_stage_attributes shader_stage_attribs[] = { 1211 {"vertex shader", false, false}, 1212 {"tessellation control shader", true, true}, 1213 {"tessellation evaluation shader", true, false}, 1214 {"geometry shader", true, false}, 1215 {"fragment shader", false, false}, 1216}; 1217 1218static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) { 1219 while (true) { 1220 1221 if (def.opcode() == spv::OpTypePointer) { 1222 def = src->get_def(def.word(3)); 1223 } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) { 1224 def = src->get_def(def.word(2)); 1225 is_array_of_verts = false; 1226 } else if (def.opcode() == spv::OpTypeStruct) { 1227 return def; 1228 } else { 1229 return src->end(); 1230 } 1231 } 1232} 1233 1234static void collect_interface_block_members(layer_data *my_data, shader_module const *src, 1235 std::map<location_t, interface_var> &out, 1236 std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts, 1237 uint32_t id, uint32_t type_id, bool is_patch) { 1238 /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */ 1239 auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch); 1240 if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) { 1241 /* this isn't an interface block. */ 1242 return; 1243 } 1244 1245 std::unordered_map<unsigned, unsigned> member_components; 1246 1247 /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */ 1248 for (auto insn : *src) { 1249 if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) { 1250 unsigned member_index = insn.word(2); 1251 1252 if (insn.word(3) == spv::DecorationComponent) { 1253 unsigned component = insn.word(4); 1254 member_components[member_index] = component; 1255 } 1256 } 1257 } 1258 1259 /* Second pass -- produce the output, from Location decorations */ 1260 for (auto insn : *src) { 1261 if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) { 1262 unsigned member_index = insn.word(2); 1263 unsigned member_type_id = type.word(2 + member_index); 1264 1265 if (insn.word(3) == spv::DecorationLocation) { 1266 unsigned location = insn.word(4); 1267 unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false); 1268 auto component_it = member_components.find(member_index); 1269 unsigned component = component_it == member_components.end() ? 0 : component_it->second; 1270 1271 for (unsigned int offset = 0; offset < num_locations; offset++) { 1272 interface_var v; 1273 v.id = id; 1274 /* TODO: member index in interface_var too? */ 1275 v.type_id = member_type_id; 1276 v.offset = offset; 1277 v.is_patch = is_patch; 1278 v.is_block_member = true; 1279 out[std::make_pair(location + offset, component)] = v; 1280 } 1281 } 1282 } 1283 } 1284} 1285 1286static void collect_interface_by_location(layer_data *my_data, shader_module const *src, spirv_inst_iter entrypoint, 1287 spv::StorageClass sinterface, std::map<location_t, interface_var> &out, 1288 bool is_array_of_verts) { 1289 std::unordered_map<unsigned, unsigned> var_locations; 1290 std::unordered_map<unsigned, unsigned> var_builtins; 1291 std::unordered_map<unsigned, unsigned> var_components; 1292 std::unordered_map<unsigned, unsigned> blocks; 1293 std::unordered_map<unsigned, unsigned> var_patch; 1294 1295 for (auto insn : *src) { 1296 1297 /* We consider two interface models: SSO rendezvous-by-location, and 1298 * builtins. Complain about anything that fits neither model. 1299 */ 1300 if (insn.opcode() == spv::OpDecorate) { 1301 if (insn.word(2) == spv::DecorationLocation) { 1302 var_locations[insn.word(1)] = insn.word(3); 1303 } 1304 1305 if (insn.word(2) == spv::DecorationBuiltIn) { 1306 var_builtins[insn.word(1)] = insn.word(3); 1307 } 1308 1309 if (insn.word(2) == spv::DecorationComponent) { 1310 var_components[insn.word(1)] = insn.word(3); 1311 } 1312 1313 if (insn.word(2) == spv::DecorationBlock) { 1314 blocks[insn.word(1)] = 1; 1315 } 1316 1317 if (insn.word(2) == spv::DecorationPatch) { 1318 var_patch[insn.word(1)] = 1; 1319 } 1320 } 1321 } 1322 1323 /* TODO: handle grouped decorations */ 1324 /* TODO: handle index=1 dual source outputs from FS -- two vars will 1325 * have the same location, and we DON'T want to clobber. */ 1326 1327 /* find the end of the entrypoint's name string. additional zero bytes follow the actual null 1328 terminator, to fill out the rest of the word - so we only need to look at the last byte in 1329 the word to determine which word contains the terminator. */ 1330 uint32_t word = 3; 1331 while (entrypoint.word(word) & 0xff000000u) { 1332 ++word; 1333 } 1334 ++word; 1335 1336 for (; word < entrypoint.len(); word++) { 1337 auto insn = src->get_def(entrypoint.word(word)); 1338 assert(insn != src->end()); 1339 assert(insn.opcode() == spv::OpVariable); 1340 1341 if (insn.word(3) == static_cast<uint32_t>(sinterface)) { 1342 unsigned id = insn.word(2); 1343 unsigned type = insn.word(1); 1344 1345 int location = value_or_default(var_locations, id, -1); 1346 int builtin = value_or_default(var_builtins, id, -1); 1347 unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */ 1348 bool is_patch = var_patch.find(id) != var_patch.end(); 1349 1350 /* All variables and interface block members in the Input or Output storage classes 1351 * must be decorated with either a builtin or an explicit location. 1352 * 1353 * TODO: integrate the interface block support here. For now, don't complain -- 1354 * a valid SPIRV module will only hit this path for the interface block case, as the 1355 * individual members of the type are decorated, rather than variable declarations. 1356 */ 1357 1358 if (location != -1) { 1359 /* A user-defined interface variable, with a location. Where a variable 1360 * occupied multiple locations, emit one result for each. */ 1361 unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch); 1362 for (unsigned int offset = 0; offset < num_locations; offset++) { 1363 interface_var v; 1364 v.id = id; 1365 v.type_id = type; 1366 v.offset = offset; 1367 v.is_patch = is_patch; 1368 v.is_block_member = false; 1369 out[std::make_pair(location + offset, component)] = v; 1370 } 1371 } else if (builtin == -1) { 1372 /* An interface block instance */ 1373 collect_interface_block_members(my_data, src, out, blocks, is_array_of_verts, id, type, is_patch); 1374 } 1375 } 1376 } 1377} 1378 1379static void collect_interface_by_descriptor_slot(layer_data *my_data, shader_module const *src, 1380 std::unordered_set<uint32_t> const &accessible_ids, 1381 std::map<descriptor_slot_t, interface_var> &out) { 1382 1383 std::unordered_map<unsigned, unsigned> var_sets; 1384 std::unordered_map<unsigned, unsigned> var_bindings; 1385 1386 for (auto insn : *src) { 1387 /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both 1388 * DecorationDescriptorSet and DecorationBinding. 1389 */ 1390 if (insn.opcode() == spv::OpDecorate) { 1391 if (insn.word(2) == spv::DecorationDescriptorSet) { 1392 var_sets[insn.word(1)] = insn.word(3); 1393 } 1394 1395 if (insn.word(2) == spv::DecorationBinding) { 1396 var_bindings[insn.word(1)] = insn.word(3); 1397 } 1398 } 1399 } 1400 1401 for (auto id : accessible_ids) { 1402 auto insn = src->get_def(id); 1403 assert(insn != src->end()); 1404 1405 if (insn.opcode() == spv::OpVariable && 1406 (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) { 1407 unsigned set = value_or_default(var_sets, insn.word(2), 0); 1408 unsigned binding = value_or_default(var_bindings, insn.word(2), 0); 1409 1410 auto existing_it = out.find(std::make_pair(set, binding)); 1411 if (existing_it != out.end()) { 1412 /* conflict within spv image */ 1413 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1414 __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC", 1415 "var %d (type %d) in %s interface in descriptor slot (%u,%u) conflicts with existing definition", 1416 insn.word(2), insn.word(1), storage_class_name(insn.word(3)), existing_it->first.first, 1417 existing_it->first.second); 1418 } 1419 1420 interface_var v; 1421 v.id = insn.word(2); 1422 v.type_id = insn.word(1); 1423 v.offset = 0; 1424 v.is_patch = false; 1425 v.is_block_member = false; 1426 out[std::make_pair(set, binding)] = v; 1427 } 1428 } 1429} 1430 1431static bool validate_interface_between_stages(layer_data *my_data, shader_module const *producer, 1432 spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage, 1433 shader_module const *consumer, spirv_inst_iter consumer_entrypoint, 1434 shader_stage_attributes const *consumer_stage) { 1435 std::map<location_t, interface_var> outputs; 1436 std::map<location_t, interface_var> inputs; 1437 1438 bool pass = true; 1439 1440 collect_interface_by_location(my_data, producer, producer_entrypoint, spv::StorageClassOutput, outputs, producer_stage->arrayed_output); 1441 collect_interface_by_location(my_data, consumer, consumer_entrypoint, spv::StorageClassInput, inputs, consumer_stage->arrayed_input); 1442 1443 auto a_it = outputs.begin(); 1444 auto b_it = inputs.begin(); 1445 1446 /* maps sorted by key (location); walk them together to find mismatches */ 1447 while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) { 1448 bool a_at_end = outputs.size() == 0 || a_it == outputs.end(); 1449 bool b_at_end = inputs.size() == 0 || b_it == inputs.end(); 1450 auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first; 1451 auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first; 1452 1453 if (b_at_end || ((!a_at_end) && (a_first < b_first))) { 1454 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1455 __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC", 1456 "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first, 1457 a_first.second, consumer_stage->name)) { 1458 pass = false; 1459 } 1460 a_it++; 1461 } else if (a_at_end || a_first > b_first) { 1462 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1463 __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", 1464 "%s consumes input location %u.%u which is not written by %s", consumer_stage->name, b_first.first, b_first.second, 1465 producer_stage->name)) { 1466 pass = false; 1467 } 1468 b_it++; 1469 } else { 1470 // subtleties of arrayed interfaces: 1471 // - if is_patch, then the member is not arrayed, even though the interface may be. 1472 // - if is_block_member, then the extra array level of an arrayed interface is not 1473 // expressed in the member type -- it's expressed in the block type. 1474 if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id, 1475 producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member, 1476 consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member, 1477 true)) { 1478 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1479 __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'", 1480 a_first.first, a_first.second, 1481 describe_type(producer, a_it->second.type_id).c_str(), 1482 describe_type(consumer, b_it->second.type_id).c_str())) { 1483 pass = false; 1484 } 1485 } 1486 if (a_it->second.is_patch != b_it->second.is_patch) { 1487 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0, 1488 __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", 1489 "Decoration mismatch on location %u.%u: is per-%s in %s stage but " 1490 "per-%s in %s stage", a_first.first, a_first.second, 1491 a_it->second.is_patch ? "patch" : "vertex", producer_stage->name, 1492 b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) { 1493 pass = false; 1494 } 1495 } 1496 a_it++; 1497 b_it++; 1498 } 1499 } 1500 1501 return pass; 1502} 1503 1504enum FORMAT_TYPE { 1505 FORMAT_TYPE_UNDEFINED, 1506 FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */ 1507 FORMAT_TYPE_SINT, 1508 FORMAT_TYPE_UINT, 1509}; 1510 1511static unsigned get_format_type(VkFormat fmt) { 1512 switch (fmt) { 1513 case VK_FORMAT_UNDEFINED: 1514 return FORMAT_TYPE_UNDEFINED; 1515 case VK_FORMAT_R8_SINT: 1516 case VK_FORMAT_R8G8_SINT: 1517 case VK_FORMAT_R8G8B8_SINT: 1518 case VK_FORMAT_R8G8B8A8_SINT: 1519 case VK_FORMAT_R16_SINT: 1520 case VK_FORMAT_R16G16_SINT: 1521 case VK_FORMAT_R16G16B16_SINT: 1522 case VK_FORMAT_R16G16B16A16_SINT: 1523 case VK_FORMAT_R32_SINT: 1524 case VK_FORMAT_R32G32_SINT: 1525 case VK_FORMAT_R32G32B32_SINT: 1526 case VK_FORMAT_R32G32B32A32_SINT: 1527 case VK_FORMAT_R64_SINT: 1528 case VK_FORMAT_R64G64_SINT: 1529 case VK_FORMAT_R64G64B64_SINT: 1530 case VK_FORMAT_R64G64B64A64_SINT: 1531 case VK_FORMAT_B8G8R8_SINT: 1532 case VK_FORMAT_B8G8R8A8_SINT: 1533 case VK_FORMAT_A8B8G8R8_SINT_PACK32: 1534 case VK_FORMAT_A2B10G10R10_SINT_PACK32: 1535 case VK_FORMAT_A2R10G10B10_SINT_PACK32: 1536 return FORMAT_TYPE_SINT; 1537 case VK_FORMAT_R8_UINT: 1538 case VK_FORMAT_R8G8_UINT: 1539 case VK_FORMAT_R8G8B8_UINT: 1540 case VK_FORMAT_R8G8B8A8_UINT: 1541 case VK_FORMAT_R16_UINT: 1542 case VK_FORMAT_R16G16_UINT: 1543 case VK_FORMAT_R16G16B16_UINT: 1544 case VK_FORMAT_R16G16B16A16_UINT: 1545 case VK_FORMAT_R32_UINT: 1546 case VK_FORMAT_R32G32_UINT: 1547 case VK_FORMAT_R32G32B32_UINT: 1548 case VK_FORMAT_R32G32B32A32_UINT: 1549 case VK_FORMAT_R64_UINT: 1550 case VK_FORMAT_R64G64_UINT: 1551 case VK_FORMAT_R64G64B64_UINT: 1552 case VK_FORMAT_R64G64B64A64_UINT: 1553 case VK_FORMAT_B8G8R8_UINT: 1554 case VK_FORMAT_B8G8R8A8_UINT: 1555 case VK_FORMAT_A8B8G8R8_UINT_PACK32: 1556 case VK_FORMAT_A2B10G10R10_UINT_PACK32: 1557 case VK_FORMAT_A2R10G10B10_UINT_PACK32: 1558 return FORMAT_TYPE_UINT; 1559 default: 1560 return FORMAT_TYPE_FLOAT; 1561 } 1562} 1563 1564/* characterizes a SPIR-V type appearing in an interface to a FF stage, 1565 * for comparison to a VkFormat's characterization above. */ 1566static unsigned get_fundamental_type(shader_module const *src, unsigned type) { 1567 auto insn = src->get_def(type); 1568 assert(insn != src->end()); 1569 1570 switch (insn.opcode()) { 1571 case spv::OpTypeInt: 1572 return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT; 1573 case spv::OpTypeFloat: 1574 return FORMAT_TYPE_FLOAT; 1575 case spv::OpTypeVector: 1576 return get_fundamental_type(src, insn.word(2)); 1577 case spv::OpTypeMatrix: 1578 return get_fundamental_type(src, insn.word(2)); 1579 case spv::OpTypeArray: 1580 return get_fundamental_type(src, insn.word(2)); 1581 case spv::OpTypePointer: 1582 return get_fundamental_type(src, insn.word(3)); 1583 default: 1584 return FORMAT_TYPE_UNDEFINED; 1585 } 1586} 1587 1588static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) { 1589 uint32_t bit_pos = u_ffs(stage); 1590 return bit_pos - 1; 1591} 1592 1593static bool validate_vi_consistency(layer_data *my_data, VkPipelineVertexInputStateCreateInfo const *vi) { 1594 /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer. 1595 * each binding should be specified only once. 1596 */ 1597 std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings; 1598 bool pass = true; 1599 1600 for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) { 1601 auto desc = &vi->pVertexBindingDescriptions[i]; 1602 auto &binding = bindings[desc->binding]; 1603 if (binding) { 1604 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1605 __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC", 1606 "Duplicate vertex input binding descriptions for binding %d", desc->binding)) { 1607 pass = false; 1608 } 1609 } else { 1610 binding = desc; 1611 } 1612 } 1613 1614 return pass; 1615} 1616 1617static bool validate_vi_against_vs_inputs(layer_data *my_data, VkPipelineVertexInputStateCreateInfo const *vi, 1618 shader_module const *vs, spirv_inst_iter entrypoint) { 1619 std::map<location_t, interface_var> inputs; 1620 bool pass = true; 1621 1622 collect_interface_by_location(my_data, vs, entrypoint, spv::StorageClassInput, inputs, false); 1623 1624 /* Build index by location */ 1625 std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs; 1626 if (vi) { 1627 for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) { 1628 auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format); 1629 for (auto j = 0u; j < num_locations; j++) { 1630 attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i]; 1631 } 1632 } 1633 } 1634 1635 auto it_a = attribs.begin(); 1636 auto it_b = inputs.begin(); 1637 1638 while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) { 1639 bool a_at_end = attribs.size() == 0 || it_a == attribs.end(); 1640 bool b_at_end = inputs.size() == 0 || it_b == inputs.end(); 1641 auto a_first = a_at_end ? 0 : it_a->first; 1642 auto b_first = b_at_end ? 0 : it_b->first.first; 1643 if (!a_at_end && (b_at_end || a_first < b_first)) { 1644 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1645 __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC", 1646 "Vertex attribute at location %d not consumed by VS", a_first)) { 1647 pass = false; 1648 } 1649 it_a++; 1650 } else if (!b_at_end && (a_at_end || b_first < a_first)) { 1651 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0, 1652 __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "VS consumes input at location %d but not provided", 1653 b_first)) { 1654 pass = false; 1655 } 1656 it_b++; 1657 } else { 1658 unsigned attrib_type = get_format_type(it_a->second->format); 1659 unsigned input_type = get_fundamental_type(vs, it_b->second.type_id); 1660 1661 /* type checking */ 1662 if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) { 1663 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1664 __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", 1665 "Attribute type of `%s` at location %d does not match VS input type of `%s`", 1666 string_VkFormat(it_a->second->format), a_first, 1667 describe_type(vs, it_b->second.type_id).c_str())) { 1668 pass = false; 1669 } 1670 } 1671 1672 /* OK! */ 1673 it_a++; 1674 it_b++; 1675 } 1676 } 1677 1678 return pass; 1679} 1680 1681static bool validate_fs_outputs_against_render_pass(layer_data *my_data, shader_module const *fs, 1682 spirv_inst_iter entrypoint, RENDER_PASS_NODE const *rp, uint32_t subpass) { 1683 const std::vector<VkFormat> &color_formats = rp->subpassColorFormats[subpass]; 1684 std::map<location_t, interface_var> outputs; 1685 bool pass = true; 1686 1687 /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */ 1688 1689 collect_interface_by_location(my_data, fs, entrypoint, spv::StorageClassOutput, outputs, false); 1690 1691 auto it = outputs.begin(); 1692 uint32_t attachment = 0; 1693 1694 /* Walk attachment list and outputs together -- this is a little overpowered since attachments 1695 * are currently dense, but the parallel with matching between shader stages is nice. 1696 */ 1697 1698 while ((outputs.size() > 0 && it != outputs.end()) || attachment < color_formats.size()) { 1699 if (attachment == color_formats.size() || (it != outputs.end() && it->first.first < attachment)) { 1700 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1701 __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC", 1702 "FS writes to output location %d with no matching attachment", it->first.first)) { 1703 pass = false; 1704 } 1705 it++; 1706 } else if (it == outputs.end() || it->first.first > attachment) { 1707 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1708 __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by FS", attachment)) { 1709 pass = false; 1710 } 1711 attachment++; 1712 } else { 1713 unsigned output_type = get_fundamental_type(fs, it->second.type_id); 1714 unsigned att_type = get_format_type(color_formats[attachment]); 1715 1716 /* type checking */ 1717 if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) { 1718 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1719 __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", 1720 "Attachment %d of type `%s` does not match FS output type of `%s`", attachment, 1721 string_VkFormat(color_formats[attachment]), 1722 describe_type(fs, it->second.type_id).c_str())) { 1723 pass = false; 1724 } 1725 } 1726 1727 /* OK! */ 1728 it++; 1729 attachment++; 1730 } 1731 } 1732 1733 return pass; 1734} 1735 1736/* For some analyses, we need to know about all ids referenced by the static call tree of a particular 1737 * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint, 1738 * for example. 1739 * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses. 1740 * - NOT the shader input/output interfaces. 1741 * 1742 * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth 1743 * converting parts of this to be generated from the machine-readable spec instead. 1744 */ 1745static void mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint, std::unordered_set<uint32_t> &ids) { 1746 std::unordered_set<uint32_t> worklist; 1747 worklist.insert(entrypoint.word(2)); 1748 1749 while (!worklist.empty()) { 1750 auto id_iter = worklist.begin(); 1751 auto id = *id_iter; 1752 worklist.erase(id_iter); 1753 1754 auto insn = src->get_def(id); 1755 if (insn == src->end()) { 1756 /* id is something we didn't collect in build_def_index. that's OK -- we'll stumble 1757 * across all kinds of things here that we may not care about. */ 1758 continue; 1759 } 1760 1761 /* try to add to the output set */ 1762 if (!ids.insert(id).second) { 1763 continue; /* if we already saw this id, we don't want to walk it again. */ 1764 } 1765 1766 switch (insn.opcode()) { 1767 case spv::OpFunction: 1768 /* scan whole body of the function, enlisting anything interesting */ 1769 while (++insn, insn.opcode() != spv::OpFunctionEnd) { 1770 switch (insn.opcode()) { 1771 case spv::OpLoad: 1772 case spv::OpAtomicLoad: 1773 case spv::OpAtomicExchange: 1774 case spv::OpAtomicCompareExchange: 1775 case spv::OpAtomicCompareExchangeWeak: 1776 case spv::OpAtomicIIncrement: 1777 case spv::OpAtomicIDecrement: 1778 case spv::OpAtomicIAdd: 1779 case spv::OpAtomicISub: 1780 case spv::OpAtomicSMin: 1781 case spv::OpAtomicUMin: 1782 case spv::OpAtomicSMax: 1783 case spv::OpAtomicUMax: 1784 case spv::OpAtomicAnd: 1785 case spv::OpAtomicOr: 1786 case spv::OpAtomicXor: 1787 worklist.insert(insn.word(3)); /* ptr */ 1788 break; 1789 case spv::OpStore: 1790 case spv::OpAtomicStore: 1791 worklist.insert(insn.word(1)); /* ptr */ 1792 break; 1793 case spv::OpAccessChain: 1794 case spv::OpInBoundsAccessChain: 1795 worklist.insert(insn.word(3)); /* base ptr */ 1796 break; 1797 case spv::OpSampledImage: 1798 case spv::OpImageSampleImplicitLod: 1799 case spv::OpImageSampleExplicitLod: 1800 case spv::OpImageSampleDrefImplicitLod: 1801 case spv::OpImageSampleDrefExplicitLod: 1802 case spv::OpImageSampleProjImplicitLod: 1803 case spv::OpImageSampleProjExplicitLod: 1804 case spv::OpImageSampleProjDrefImplicitLod: 1805 case spv::OpImageSampleProjDrefExplicitLod: 1806 case spv::OpImageFetch: 1807 case spv::OpImageGather: 1808 case spv::OpImageDrefGather: 1809 case spv::OpImageRead: 1810 case spv::OpImage: 1811 case spv::OpImageQueryFormat: 1812 case spv::OpImageQueryOrder: 1813 case spv::OpImageQuerySizeLod: 1814 case spv::OpImageQuerySize: 1815 case spv::OpImageQueryLod: 1816 case spv::OpImageQueryLevels: 1817 case spv::OpImageQuerySamples: 1818 case spv::OpImageSparseSampleImplicitLod: 1819 case spv::OpImageSparseSampleExplicitLod: 1820 case spv::OpImageSparseSampleDrefImplicitLod: 1821 case spv::OpImageSparseSampleDrefExplicitLod: 1822 case spv::OpImageSparseSampleProjImplicitLod: 1823 case spv::OpImageSparseSampleProjExplicitLod: 1824 case spv::OpImageSparseSampleProjDrefImplicitLod: 1825 case spv::OpImageSparseSampleProjDrefExplicitLod: 1826 case spv::OpImageSparseFetch: 1827 case spv::OpImageSparseGather: 1828 case spv::OpImageSparseDrefGather: 1829 case spv::OpImageTexelPointer: 1830 worklist.insert(insn.word(3)); /* image or sampled image */ 1831 break; 1832 case spv::OpImageWrite: 1833 worklist.insert(insn.word(1)); /* image -- different operand order to above */ 1834 break; 1835 case spv::OpFunctionCall: 1836 for (uint32_t i = 3; i < insn.len(); i++) { 1837 worklist.insert(insn.word(i)); /* fn itself, and all args */ 1838 } 1839 break; 1840 1841 case spv::OpExtInst: 1842 for (uint32_t i = 5; i < insn.len(); i++) { 1843 worklist.insert(insn.word(i)); /* operands to ext inst */ 1844 } 1845 break; 1846 } 1847 } 1848 break; 1849 } 1850 } 1851} 1852 1853static bool validate_push_constant_block_against_pipeline(layer_data *my_data, 1854 std::vector<VkPushConstantRange> const *pushConstantRanges, 1855 shader_module const *src, spirv_inst_iter type, 1856 VkShaderStageFlagBits stage) { 1857 bool pass = true; 1858 1859 /* strip off ptrs etc */ 1860 type = get_struct_type(src, type, false); 1861 assert(type != src->end()); 1862 1863 /* validate directly off the offsets. this isn't quite correct for arrays 1864 * and matrices, but is a good first step. TODO: arrays, matrices, weird 1865 * sizes */ 1866 for (auto insn : *src) { 1867 if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) { 1868 1869 if (insn.word(3) == spv::DecorationOffset) { 1870 unsigned offset = insn.word(4); 1871 auto size = 4; /* bytes; TODO: calculate this based on the type */ 1872 1873 bool found_range = false; 1874 for (auto const &range : *pushConstantRanges) { 1875 if (range.offset <= offset && range.offset + range.size >= offset + size) { 1876 found_range = true; 1877 1878 if ((range.stageFlags & stage) == 0) { 1879 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1880 __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC", 1881 "Push constant range covering variable starting at " 1882 "offset %u not accessible from stage %s", 1883 offset, string_VkShaderStageFlagBits(stage))) { 1884 pass = false; 1885 } 1886 } 1887 1888 break; 1889 } 1890 } 1891 1892 if (!found_range) { 1893 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1894 __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC", 1895 "Push constant range covering variable starting at " 1896 "offset %u not declared in layout", 1897 offset)) { 1898 pass = false; 1899 } 1900 } 1901 } 1902 } 1903 } 1904 1905 return pass; 1906} 1907 1908static bool validate_push_constant_usage(layer_data *my_data, 1909 std::vector<VkPushConstantRange> const *pushConstantRanges, shader_module const *src, 1910 std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) { 1911 bool pass = true; 1912 1913 for (auto id : accessible_ids) { 1914 auto def_insn = src->get_def(id); 1915 if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) { 1916 pass &= validate_push_constant_block_against_pipeline(my_data, pushConstantRanges, src, 1917 src->get_def(def_insn.word(1)), stage); 1918 } 1919 } 1920 1921 return pass; 1922} 1923 1924// For given pipelineLayout verify that the setLayout at slot.first 1925// has the requested binding at slot.second 1926static VkDescriptorSetLayoutBinding const * get_descriptor_binding(layer_data *my_data, PIPELINE_LAYOUT_NODE *pipelineLayout, descriptor_slot_t slot) { 1927 1928 if (!pipelineLayout) 1929 return nullptr; 1930 1931 if (slot.first >= pipelineLayout->descriptorSetLayouts.size()) 1932 return nullptr; 1933 1934 auto const layout_node = my_data->descriptorSetLayoutMap[pipelineLayout->descriptorSetLayouts[slot.first]]; 1935 1936 auto bindingIt = layout_node->bindingToIndexMap.find(slot.second); 1937 if ((bindingIt == layout_node->bindingToIndexMap.end()) || (layout_node->createInfo.pBindings == NULL)) 1938 return nullptr; 1939 1940 assert(bindingIt->second < layout_node->createInfo.bindingCount); 1941 return &layout_node->createInfo.pBindings[bindingIt->second]; 1942} 1943 1944// Block of code at start here for managing/tracking Pipeline state that this layer cares about 1945 1946static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0}; 1947 1948// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound 1949// Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates 1950// to that same cmd buffer by separate thread are not changing state from underneath us 1951// Track the last cmd buffer touched by this thread 1952 1953static bool hasDrawCmd(GLOBAL_CB_NODE *pCB) { 1954 for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) { 1955 if (pCB->drawCount[i]) 1956 return true; 1957 } 1958 return false; 1959} 1960 1961// Check object status for selected flag state 1962static bool validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags, 1963 DRAW_STATE_ERROR error_code, const char *fail_msg) { 1964 if (!(pNode->status & status_mask)) { 1965 return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 1966 reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, error_code, "DS", 1967 "CB object %#" PRIxLEAST64 ": %s", reinterpret_cast<const uint64_t &>(pNode->commandBuffer), fail_msg); 1968 } 1969 return false; 1970} 1971 1972// Retrieve pipeline node ptr for given pipeline object 1973static PIPELINE_NODE *getPipeline(layer_data *my_data, const VkPipeline pipeline) { 1974 if (my_data->pipelineMap.find(pipeline) == my_data->pipelineMap.end()) { 1975 return NULL; 1976 } 1977 return my_data->pipelineMap[pipeline]; 1978} 1979 1980// Return true if for a given PSO, the given state enum is dynamic, else return false 1981static bool isDynamic(const PIPELINE_NODE *pPipeline, const VkDynamicState state) { 1982 if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) { 1983 for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) { 1984 if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) 1985 return true; 1986 } 1987 } 1988 return false; 1989} 1990 1991// Validate state stored as flags at time of draw call 1992static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe, bool indexedDraw) { 1993 bool result; 1994 result = validate_status(dev_data, pCB, CBSTATUS_VIEWPORT_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_VIEWPORT_NOT_BOUND, 1995 "Dynamic viewport state not set for this command buffer"); 1996 result |= validate_status(dev_data, pCB, CBSTATUS_SCISSOR_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_SCISSOR_NOT_BOUND, 1997 "Dynamic scissor state not set for this command buffer"); 1998 if (pPipe->graphicsPipelineCI.pInputAssemblyState && 1999 ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) || 2000 (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) { 2001 result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2002 DRAWSTATE_LINE_WIDTH_NOT_BOUND, "Dynamic line width state not set for this command buffer"); 2003 } 2004 if (pPipe->graphicsPipelineCI.pRasterizationState && 2005 (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) { 2006 result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2007 DRAWSTATE_DEPTH_BIAS_NOT_BOUND, "Dynamic depth bias state not set for this command buffer"); 2008 } 2009 if (pPipe->blendConstantsEnabled) { 2010 result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2011 DRAWSTATE_BLEND_NOT_BOUND, "Dynamic blend constants state not set for this command buffer"); 2012 } 2013 if (pPipe->graphicsPipelineCI.pDepthStencilState && 2014 (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) { 2015 result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2016 DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, "Dynamic depth bounds state not set for this command buffer"); 2017 } 2018 if (pPipe->graphicsPipelineCI.pDepthStencilState && 2019 (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) { 2020 result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2021 DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil read mask state not set for this command buffer"); 2022 result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2023 DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil write mask state not set for this command buffer"); 2024 result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2025 DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil reference state not set for this command buffer"); 2026 } 2027 if (indexedDraw) { 2028 result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2029 DRAWSTATE_INDEX_BUFFER_NOT_BOUND, 2030 "Index buffer object not bound to this command buffer when Indexed Draw attempted"); 2031 } 2032 return result; 2033} 2034 2035// Verify attachment reference compatibility according to spec 2036// If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this 2037// If both AttachmentReference arrays have requested index, check their corresponding AttachementDescriptions 2038// to make sure that format and samples counts match. 2039// If not, they are not compatible. 2040static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary, 2041 const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments, 2042 const VkAttachmentReference *pSecondary, const uint32_t secondaryCount, 2043 const VkAttachmentDescription *pSecondaryAttachments) { 2044 if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED 2045 if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment) 2046 return true; 2047 } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED 2048 if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment) 2049 return true; 2050 } else { // format and sample count must match 2051 if ((pPrimaryAttachments[pPrimary[index].attachment].format == 2052 pSecondaryAttachments[pSecondary[index].attachment].format) && 2053 (pPrimaryAttachments[pPrimary[index].attachment].samples == 2054 pSecondaryAttachments[pSecondary[index].attachment].samples)) 2055 return true; 2056 } 2057 // Format and sample counts didn't match 2058 return false; 2059} 2060 2061// For give primary and secondary RenderPass objects, verify that they're compatible 2062static bool verify_renderpass_compatibility(layer_data *my_data, const VkRenderPass primaryRP, const VkRenderPass secondaryRP, 2063 string &errorMsg) { 2064 if (my_data->renderPassMap.find(primaryRP) == my_data->renderPassMap.end()) { 2065 stringstream errorStr; 2066 errorStr << "invalid VkRenderPass (" << primaryRP << ")"; 2067 errorMsg = errorStr.str(); 2068 return false; 2069 } else if (my_data->renderPassMap.find(secondaryRP) == my_data->renderPassMap.end()) { 2070 stringstream errorStr; 2071 errorStr << "invalid VkRenderPass (" << secondaryRP << ")"; 2072 errorMsg = errorStr.str(); 2073 return false; 2074 } 2075 // Trivial pass case is exact same RP 2076 if (primaryRP == secondaryRP) { 2077 return true; 2078 } 2079 const VkRenderPassCreateInfo *primaryRPCI = my_data->renderPassMap[primaryRP]->pCreateInfo; 2080 const VkRenderPassCreateInfo *secondaryRPCI = my_data->renderPassMap[secondaryRP]->pCreateInfo; 2081 if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) { 2082 stringstream errorStr; 2083 errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount 2084 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses."; 2085 errorMsg = errorStr.str(); 2086 return false; 2087 } 2088 uint32_t spIndex = 0; 2089 for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) { 2090 // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible 2091 uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount; 2092 uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount; 2093 uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount); 2094 for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) { 2095 if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount, 2096 primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments, 2097 secondaryColorCount, secondaryRPCI->pAttachments)) { 2098 stringstream errorStr; 2099 errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible."; 2100 errorMsg = errorStr.str(); 2101 return false; 2102 } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments, 2103 primaryColorCount, primaryRPCI->pAttachments, 2104 secondaryRPCI->pSubpasses[spIndex].pResolveAttachments, 2105 secondaryColorCount, secondaryRPCI->pAttachments)) { 2106 stringstream errorStr; 2107 errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible."; 2108 errorMsg = errorStr.str(); 2109 return false; 2110 } 2111 } 2112 2113 if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment, 2114 1, primaryRPCI->pAttachments, 2115 secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment, 2116 1, secondaryRPCI->pAttachments)) { 2117 stringstream errorStr; 2118 errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible."; 2119 errorMsg = errorStr.str(); 2120 return false; 2121 } 2122 2123 uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount; 2124 uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount; 2125 uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount); 2126 for (uint32_t i = 0; i < inputMax; ++i) { 2127 if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount, 2128 primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments, 2129 secondaryColorCount, secondaryRPCI->pAttachments)) { 2130 stringstream errorStr; 2131 errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible."; 2132 errorMsg = errorStr.str(); 2133 return false; 2134 } 2135 } 2136 } 2137 return true; 2138} 2139 2140// For give SET_NODE, verify that its Set is compatible w/ the setLayout corresponding to pipelineLayout[layoutIndex] 2141static bool verify_set_layout_compatibility(layer_data *my_data, const SET_NODE *pSet, const VkPipelineLayout layout, 2142 const uint32_t layoutIndex, string &errorMsg) { 2143 auto pipeline_layout_it = my_data->pipelineLayoutMap.find(layout); 2144 if (pipeline_layout_it == my_data->pipelineLayoutMap.end()) { 2145 stringstream errorStr; 2146 errorStr << "invalid VkPipelineLayout (" << layout << ")"; 2147 errorMsg = errorStr.str(); 2148 return false; 2149 } 2150 if (layoutIndex >= pipeline_layout_it->second.descriptorSetLayouts.size()) { 2151 stringstream errorStr; 2152 errorStr << "VkPipelineLayout (" << layout << ") only contains " << pipeline_layout_it->second.descriptorSetLayouts.size() 2153 << " setLayouts corresponding to sets 0-" << pipeline_layout_it->second.descriptorSetLayouts.size() - 1 2154 << ", but you're attempting to bind set to index " << layoutIndex; 2155 errorMsg = errorStr.str(); 2156 return false; 2157 } 2158 // Get the specific setLayout from PipelineLayout that overlaps this set 2159 LAYOUT_NODE *pLayoutNode = my_data->descriptorSetLayoutMap[pipeline_layout_it->second.descriptorSetLayouts[layoutIndex]]; 2160 if (pLayoutNode->layout == pSet->pLayout->layout) { // trivial pass case 2161 return true; 2162 } 2163 size_t descriptorCount = pLayoutNode->descriptorTypes.size(); 2164 if (descriptorCount != pSet->pLayout->descriptorTypes.size()) { 2165 stringstream errorStr; 2166 errorStr << "setLayout " << layoutIndex << " from pipelineLayout " << layout << " has " << descriptorCount 2167 << " descriptors, but corresponding set being bound has " << pSet->pLayout->descriptorTypes.size() 2168 << " descriptors."; 2169 errorMsg = errorStr.str(); 2170 return false; // trivial fail case 2171 } 2172 // Now need to check set against corresponding pipelineLayout to verify compatibility 2173 for (size_t i = 0; i < descriptorCount; ++i) { 2174 // Need to verify that layouts are identically defined 2175 // TODO : Is below sufficient? Making sure that types & stageFlags match per descriptor 2176 // do we also need to check immutable samplers? 2177 if (pLayoutNode->descriptorTypes[i] != pSet->pLayout->descriptorTypes[i]) { 2178 stringstream errorStr; 2179 errorStr << "descriptor " << i << " for descriptorSet being bound is type '" 2180 << string_VkDescriptorType(pSet->pLayout->descriptorTypes[i]) 2181 << "' but corresponding descriptor from pipelineLayout is type '" 2182 << string_VkDescriptorType(pLayoutNode->descriptorTypes[i]) << "'"; 2183 errorMsg = errorStr.str(); 2184 return false; 2185 } 2186 if (pLayoutNode->stageFlags[i] != pSet->pLayout->stageFlags[i]) { 2187 stringstream errorStr; 2188 errorStr << "stageFlags " << i << " for descriptorSet being bound is " << pSet->pLayout->stageFlags[i] 2189 << "' but corresponding descriptor from pipelineLayout has stageFlags " << pLayoutNode->stageFlags[i]; 2190 errorMsg = errorStr.str(); 2191 return false; 2192 } 2193 } 2194 return true; 2195} 2196 2197// Validate that data for each specialization entry is fully contained within the buffer. 2198static bool validate_specialization_offsets(layer_data *my_data, VkPipelineShaderStageCreateInfo const *info) { 2199 bool pass = true; 2200 2201 VkSpecializationInfo const *spec = info->pSpecializationInfo; 2202 2203 if (spec) { 2204 for (auto i = 0u; i < spec->mapEntryCount; i++) { 2205 if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) { 2206 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 2207 /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC", 2208 "Specialization entry %u (for constant id %u) references memory outside provided " 2209 "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER 2210 " bytes provided)", 2211 i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset, 2212 spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) { 2213 2214 pass = false; 2215 } 2216 } 2217 } 2218 } 2219 2220 return pass; 2221} 2222 2223static bool descriptor_type_match(layer_data *my_data, shader_module const *module, uint32_t type_id, 2224 VkDescriptorType descriptor_type, unsigned &descriptor_count) { 2225 auto type = module->get_def(type_id); 2226 2227 descriptor_count = 1; 2228 2229 /* Strip off any array or ptrs. Where we remove array levels, adjust the 2230 * descriptor count for each dimension. */ 2231 while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) { 2232 if (type.opcode() == spv::OpTypeArray) { 2233 descriptor_count *= get_constant_value(module, type.word(3)); 2234 type = module->get_def(type.word(2)); 2235 } 2236 else { 2237 type = module->get_def(type.word(3)); 2238 } 2239 } 2240 2241 switch (type.opcode()) { 2242 case spv::OpTypeStruct: { 2243 for (auto insn : *module) { 2244 if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) { 2245 if (insn.word(2) == spv::DecorationBlock) { 2246 return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER || 2247 descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; 2248 } else if (insn.word(2) == spv::DecorationBufferBlock) { 2249 return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER || 2250 descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC; 2251 } 2252 } 2253 } 2254 2255 /* Invalid */ 2256 return false; 2257 } 2258 2259 case spv::OpTypeSampler: 2260 return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER; 2261 2262 case spv::OpTypeSampledImage: 2263 if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) { 2264 /* Slight relaxation for some GLSL historical madness: samplerBuffer 2265 * doesn't really have a sampler, and a texel buffer descriptor 2266 * doesn't really provide one. Allow this slight mismatch. 2267 */ 2268 auto image_type = module->get_def(type.word(2)); 2269 auto dim = image_type.word(3); 2270 auto sampled = image_type.word(7); 2271 return dim == spv::DimBuffer && sampled == 1; 2272 } 2273 return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; 2274 2275 case spv::OpTypeImage: { 2276 /* Many descriptor types backing image types-- depends on dimension 2277 * and whether the image will be used with a sampler. SPIRV for 2278 * Vulkan requires that sampled be 1 or 2 -- leaving the decision to 2279 * runtime is unacceptable. 2280 */ 2281 auto dim = type.word(3); 2282 auto sampled = type.word(7); 2283 2284 if (dim == spv::DimSubpassData) { 2285 return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; 2286 } else if (dim == spv::DimBuffer) { 2287 if (sampled == 1) { 2288 return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; 2289 } else { 2290 return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; 2291 } 2292 } else if (sampled == 1) { 2293 return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; 2294 } else { 2295 return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; 2296 } 2297 } 2298 2299 /* We shouldn't really see any other junk types -- but if we do, they're 2300 * a mismatch. 2301 */ 2302 default: 2303 return false; /* Mismatch */ 2304 } 2305} 2306 2307static bool require_feature(layer_data *my_data, VkBool32 feature, char const *feature_name) { 2308 if (!feature) { 2309 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 2310 __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC", 2311 "Shader requires VkPhysicalDeviceFeatures::%s but is not " 2312 "enabled on the device", 2313 feature_name)) { 2314 return false; 2315 } 2316 } 2317 2318 return true; 2319} 2320 2321static bool validate_shader_capabilities(layer_data *my_data, shader_module const *src) { 2322 bool pass = true; 2323 2324 auto enabledFeatures = &my_data->phys_dev_properties.features; 2325 2326 for (auto insn : *src) { 2327 if (insn.opcode() == spv::OpCapability) { 2328 switch (insn.word(1)) { 2329 case spv::CapabilityMatrix: 2330 case spv::CapabilityShader: 2331 case spv::CapabilityInputAttachment: 2332 case spv::CapabilitySampled1D: 2333 case spv::CapabilityImage1D: 2334 case spv::CapabilitySampledBuffer: 2335 case spv::CapabilityImageBuffer: 2336 case spv::CapabilityImageQuery: 2337 case spv::CapabilityDerivativeControl: 2338 // Always supported by a Vulkan 1.0 implementation -- no feature bits. 2339 break; 2340 2341 case spv::CapabilityGeometry: 2342 pass &= require_feature(my_data, enabledFeatures->geometryShader, "geometryShader"); 2343 break; 2344 2345 case spv::CapabilityTessellation: 2346 pass &= require_feature(my_data, enabledFeatures->tessellationShader, "tessellationShader"); 2347 break; 2348 2349 case spv::CapabilityFloat64: 2350 pass &= require_feature(my_data, enabledFeatures->shaderFloat64, "shaderFloat64"); 2351 break; 2352 2353 case spv::CapabilityInt64: 2354 pass &= require_feature(my_data, enabledFeatures->shaderInt64, "shaderInt64"); 2355 break; 2356 2357 case spv::CapabilityTessellationPointSize: 2358 case spv::CapabilityGeometryPointSize: 2359 pass &= require_feature(my_data, enabledFeatures->shaderTessellationAndGeometryPointSize, 2360 "shaderTessellationAndGeometryPointSize"); 2361 break; 2362 2363 case spv::CapabilityImageGatherExtended: 2364 pass &= require_feature(my_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended"); 2365 break; 2366 2367 case spv::CapabilityStorageImageMultisample: 2368 pass &= require_feature(my_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample"); 2369 break; 2370 2371 case spv::CapabilityUniformBufferArrayDynamicIndexing: 2372 pass &= require_feature(my_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing, 2373 "shaderUniformBufferArrayDynamicIndexing"); 2374 break; 2375 2376 case spv::CapabilitySampledImageArrayDynamicIndexing: 2377 pass &= require_feature(my_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing, 2378 "shaderSampledImageArrayDynamicIndexing"); 2379 break; 2380 2381 case spv::CapabilityStorageBufferArrayDynamicIndexing: 2382 pass &= require_feature(my_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing, 2383 "shaderStorageBufferArrayDynamicIndexing"); 2384 break; 2385 2386 case spv::CapabilityStorageImageArrayDynamicIndexing: 2387 pass &= require_feature(my_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing, 2388 "shaderStorageImageArrayDynamicIndexing"); 2389 break; 2390 2391 case spv::CapabilityClipDistance: 2392 pass &= require_feature(my_data, enabledFeatures->shaderClipDistance, "shaderClipDistance"); 2393 break; 2394 2395 case spv::CapabilityCullDistance: 2396 pass &= require_feature(my_data, enabledFeatures->shaderCullDistance, "shaderCullDistance"); 2397 break; 2398 2399 case spv::CapabilityImageCubeArray: 2400 pass &= require_feature(my_data, enabledFeatures->imageCubeArray, "imageCubeArray"); 2401 break; 2402 2403 case spv::CapabilitySampleRateShading: 2404 pass &= require_feature(my_data, enabledFeatures->sampleRateShading, "sampleRateShading"); 2405 break; 2406 2407 case spv::CapabilitySparseResidency: 2408 pass &= require_feature(my_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency"); 2409 break; 2410 2411 case spv::CapabilityMinLod: 2412 pass &= require_feature(my_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod"); 2413 break; 2414 2415 case spv::CapabilitySampledCubeArray: 2416 pass &= require_feature(my_data, enabledFeatures->imageCubeArray, "imageCubeArray"); 2417 break; 2418 2419 case spv::CapabilityImageMSArray: 2420 pass &= require_feature(my_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample"); 2421 break; 2422 2423 case spv::CapabilityStorageImageExtendedFormats: 2424 pass &= require_feature(my_data, enabledFeatures->shaderStorageImageExtendedFormats, 2425 "shaderStorageImageExtendedFormats"); 2426 break; 2427 2428 case spv::CapabilityInterpolationFunction: 2429 pass &= require_feature(my_data, enabledFeatures->sampleRateShading, "sampleRateShading"); 2430 break; 2431 2432 case spv::CapabilityStorageImageReadWithoutFormat: 2433 pass &= require_feature(my_data, enabledFeatures->shaderStorageImageReadWithoutFormat, 2434 "shaderStorageImageReadWithoutFormat"); 2435 break; 2436 2437 case spv::CapabilityStorageImageWriteWithoutFormat: 2438 pass &= require_feature(my_data, enabledFeatures->shaderStorageImageWriteWithoutFormat, 2439 "shaderStorageImageWriteWithoutFormat"); 2440 break; 2441 2442 case spv::CapabilityMultiViewport: 2443 pass &= require_feature(my_data, enabledFeatures->multiViewport, "multiViewport"); 2444 break; 2445 2446 default: 2447 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 2448 __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC", 2449 "Shader declares capability %u, not supported in Vulkan.", 2450 insn.word(1))) 2451 pass = false; 2452 break; 2453 } 2454 } 2455 } 2456 2457 return pass; 2458} 2459 2460static bool validate_pipeline_shader_stage(layer_data *dev_data, VkPipelineShaderStageCreateInfo const *pStage, 2461 PIPELINE_NODE *pipeline, PIPELINE_LAYOUT_NODE *pipelineLayout, 2462 shader_module **out_module, spirv_inst_iter *out_entrypoint) { 2463 bool pass = true; 2464 auto module = *out_module = dev_data->shaderModuleMap[pStage->module].get(); 2465 pass &= validate_specialization_offsets(dev_data, pStage); 2466 2467 /* find the entrypoint */ 2468 auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage); 2469 if (entrypoint == module->end()) { 2470 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 2471 __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC", 2472 "No entrypoint found named `%s` for stage %s", pStage->pName, 2473 string_VkShaderStageFlagBits(pStage->stage))) { 2474 pass = false; 2475 } 2476 } 2477 2478 /* validate shader capabilities against enabled device features */ 2479 pass &= validate_shader_capabilities(dev_data, module); 2480 2481 /* mark accessible ids */ 2482 std::unordered_set<uint32_t> accessible_ids; 2483 mark_accessible_ids(module, entrypoint, accessible_ids); 2484 2485 /* validate descriptor set layout against what the entrypoint actually uses */ 2486 std::map<descriptor_slot_t, interface_var> descriptor_uses; 2487 collect_interface_by_descriptor_slot(dev_data, module, accessible_ids, descriptor_uses); 2488 2489 /* validate push constant usage */ 2490 pass &= validate_push_constant_usage(dev_data, &pipelineLayout->pushConstantRanges, 2491 module, accessible_ids, pStage->stage); 2492 2493 /* validate descriptor use */ 2494 for (auto use : descriptor_uses) { 2495 // While validating shaders capture which slots are used by the pipeline 2496 pipeline->active_slots[use.first.first].insert(use.first.second); 2497 2498 /* find the matching binding */ 2499 auto binding = get_descriptor_binding(dev_data, pipelineLayout, use.first); 2500 unsigned required_descriptor_count; 2501 2502 if (!binding) { 2503 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 2504 __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC", 2505 "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout", 2506 use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) { 2507 pass = false; 2508 } 2509 } else if (~binding->stageFlags & pStage->stage) { 2510 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 2511 /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC", 2512 "Shader uses descriptor slot %u.%u (used " 2513 "as type `%s`) but descriptor not " 2514 "accessible from stage %s", 2515 use.first.first, use.first.second, 2516 describe_type(module, use.second.type_id).c_str(), 2517 string_VkShaderStageFlagBits(pStage->stage))) { 2518 pass = false; 2519 } 2520 } else if (!descriptor_type_match(dev_data, module, use.second.type_id, binding->descriptorType, /*out*/ required_descriptor_count)) { 2521 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 2522 __LINE__, SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC", 2523 "Type mismatch on descriptor slot " 2524 "%u.%u (used as type `%s`) but " 2525 "descriptor of type %s", 2526 use.first.first, use.first.second, 2527 describe_type(module, use.second.type_id).c_str(), 2528 string_VkDescriptorType(binding->descriptorType))) { 2529 pass = false; 2530 } 2531 } else if (binding->descriptorCount < required_descriptor_count) { 2532 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 2533 __LINE__, SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC", 2534 "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided", 2535 required_descriptor_count, use.first.first, use.first.second, 2536 describe_type(module, use.second.type_id).c_str(), 2537 binding->descriptorCount)) { 2538 pass = false; 2539 } 2540 } 2541 } 2542 2543 return pass; 2544} 2545 2546 2547// Validate that the shaders used by the given pipeline and store the active_slots 2548// that are actually used by the pipeline into pPipeline->active_slots 2549static bool validate_and_capture_pipeline_shader_state(layer_data *my_data, PIPELINE_NODE *pPipeline) { 2550 auto pCreateInfo = reinterpret_cast<VkGraphicsPipelineCreateInfo const *>(&pPipeline->graphicsPipelineCI); 2551 int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT); 2552 int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT); 2553 2554 shader_module *shaders[5]; 2555 memset(shaders, 0, sizeof(shaders)); 2556 spirv_inst_iter entrypoints[5]; 2557 memset(entrypoints, 0, sizeof(entrypoints)); 2558 VkPipelineVertexInputStateCreateInfo const *vi = 0; 2559 bool pass = true; 2560 2561 auto pipelineLayout = pCreateInfo->layout != VK_NULL_HANDLE ? &my_data->pipelineLayoutMap[pCreateInfo->layout] : nullptr; 2562 2563 for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) { 2564 VkPipelineShaderStageCreateInfo const *pStage = 2565 reinterpret_cast<VkPipelineShaderStageCreateInfo const *>(&pCreateInfo->pStages[i]); 2566 auto stage_id = get_shader_stage_id(pStage->stage); 2567 pass &= validate_pipeline_shader_stage(my_data, pStage, pPipeline, pipelineLayout, 2568 &shaders[stage_id], &entrypoints[stage_id]); 2569 } 2570 2571 vi = pCreateInfo->pVertexInputState; 2572 2573 if (vi) { 2574 pass &= validate_vi_consistency(my_data, vi); 2575 } 2576 2577 if (shaders[vertex_stage]) { 2578 pass &= validate_vi_against_vs_inputs(my_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]); 2579 } 2580 2581 int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT); 2582 int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT); 2583 2584 while (!shaders[producer] && producer != fragment_stage) { 2585 producer++; 2586 consumer++; 2587 } 2588 2589 for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) { 2590 assert(shaders[producer]); 2591 if (shaders[consumer]) { 2592 pass &= validate_interface_between_stages(my_data, 2593 shaders[producer], entrypoints[producer], &shader_stage_attribs[producer], 2594 shaders[consumer], entrypoints[consumer], &shader_stage_attribs[consumer]); 2595 2596 producer = consumer; 2597 } 2598 } 2599 2600 auto rp = pCreateInfo->renderPass != VK_NULL_HANDLE ? my_data->renderPassMap[pCreateInfo->renderPass] : nullptr; 2601 2602 if (shaders[fragment_stage] && rp) { 2603 pass &= validate_fs_outputs_against_render_pass(my_data, shaders[fragment_stage], entrypoints[fragment_stage], rp, 2604 pCreateInfo->subpass); 2605 } 2606 2607 return pass; 2608} 2609 2610static bool validate_compute_pipeline(layer_data *my_data, PIPELINE_NODE *pPipeline) { 2611 auto pCreateInfo = reinterpret_cast<VkComputePipelineCreateInfo const *>(&pPipeline->computePipelineCI); 2612 2613 auto pipelineLayout = pCreateInfo->layout != VK_NULL_HANDLE ? &my_data->pipelineLayoutMap[pCreateInfo->layout] : nullptr; 2614 2615 shader_module *module; 2616 spirv_inst_iter entrypoint; 2617 2618 return validate_pipeline_shader_stage(my_data, &pCreateInfo->stage, pPipeline, pipelineLayout, 2619 &module, &entrypoint); 2620} 2621 2622// Return Set node ptr for specified set or else NULL 2623static SET_NODE *getSetNode(layer_data *my_data, const VkDescriptorSet set) { 2624 if (my_data->setMap.find(set) == my_data->setMap.end()) { 2625 return NULL; 2626 } 2627 return my_data->setMap[set]; 2628} 2629 2630// For given Layout Node and binding, return index where that binding begins 2631static uint32_t getBindingStartIndex(const LAYOUT_NODE *pLayout, const uint32_t binding) { 2632 uint32_t offsetIndex = 0; 2633 for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) { 2634 if (pLayout->createInfo.pBindings[i].binding == binding) 2635 break; 2636 offsetIndex += pLayout->createInfo.pBindings[i].descriptorCount; 2637 } 2638 return offsetIndex; 2639} 2640 2641// For given layout node and binding, return last index that is updated 2642static uint32_t getBindingEndIndex(const LAYOUT_NODE *pLayout, const uint32_t binding) { 2643 uint32_t offsetIndex = 0; 2644 for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) { 2645 offsetIndex += pLayout->createInfo.pBindings[i].descriptorCount; 2646 if (pLayout->createInfo.pBindings[i].binding == binding) 2647 break; 2648 } 2649 return offsetIndex - 1; 2650} 2651 2652// For the given command buffer, verify and update the state for activeSetBindingsPairs 2653// This includes: 2654// 1. Verifying that any dynamic descriptor in that set has a valid dynamic offset bound. 2655// To be valid, the dynamic offset combined with the offset and range from its 2656// descriptor update must not overflow the size of its buffer being updated 2657// 2. Grow updateImages for given pCB to include any bound STORAGE_IMAGE descriptor images 2658// 3. Grow updateBuffers for pCB to include buffers from STORAGE*_BUFFER descriptor buffers 2659static bool validate_and_update_drawtime_descriptor_state( 2660 layer_data *dev_data, GLOBAL_CB_NODE *pCB, 2661 const vector<std::pair<SET_NODE *, unordered_set<uint32_t>>> &activeSetBindingsPairs) { 2662 bool result = false; 2663 2664 VkWriteDescriptorSet *pWDS = NULL; 2665 uint32_t dynOffsetIndex = 0; 2666 VkDeviceSize bufferSize = 0; 2667 for (auto set_bindings_pair : activeSetBindingsPairs) { 2668 SET_NODE *set_node = set_bindings_pair.first; 2669 LAYOUT_NODE *layout_node = set_node->pLayout; 2670 for (auto binding : set_bindings_pair.second) { 2671 uint32_t startIdx = getBindingStartIndex(layout_node, binding); 2672 uint32_t endIdx = getBindingEndIndex(layout_node, binding); 2673 for (uint32_t i = startIdx; i <= endIdx; ++i) { 2674 // We did check earlier to verify that set was updated, but now make sure given slot was updated 2675 // TODO : Would be better to store set# that set is bound to so we can report set.binding[index] not updated 2676 // For immutable sampler w/o combined image, don't need to update 2677 if ((set_node->pLayout->createInfo.pBindings[i].descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER) && 2678 (set_node->pLayout->createInfo.pBindings[i].descriptorCount != 0) && 2679 (set_node->pLayout->createInfo.pBindings[i].pImmutableSamplers)) { 2680 // Nothing to do here 2681 } else if (!set_node->pDescriptorUpdates[i]) { 2682 result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2683 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<const uint64_t &>(set_node->set), __LINE__, 2684 DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS", 2685 "DS %#" PRIxLEAST64 " bound and active but it never had binding %u updated. It is now being used to draw so " 2686 "this will result in undefined behavior.", 2687 reinterpret_cast<const uint64_t &>(set_node->set), binding); 2688 } else { 2689 switch (set_node->pDescriptorUpdates[i]->sType) { 2690 case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET: 2691 pWDS = (VkWriteDescriptorSet *)set_node->pDescriptorUpdates[i]; 2692 2693 // Verify uniform and storage buffers actually are bound to valid memory at draw time. 2694 if ((pWDS->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) || 2695 (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) || 2696 (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER) || 2697 (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) { 2698 for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) { 2699 auto buffer_node = dev_data->bufferMap.find(pWDS->pBufferInfo[j].buffer); 2700 if (buffer_node == dev_data->bufferMap.end()) { 2701 result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2702 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 2703 reinterpret_cast<const uint64_t &>(set_node->set), __LINE__, 2704 DRAWSTATE_INVALID_BUFFER, "DS", 2705 "VkDescriptorSet (%#" PRIxLEAST64 ") %s (%#" PRIxLEAST64 ") at index #%u" 2706 " is not defined! Has vkCreateBuffer been called?", 2707 reinterpret_cast<const uint64_t &>(set_node->set), 2708 string_VkDescriptorType(pWDS->descriptorType), 2709 reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), i); 2710 } else { 2711 auto mem_entry = dev_data->memObjMap.find(buffer_node->second.mem); 2712 if (mem_entry == dev_data->memObjMap.end()) { 2713 result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2714 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 2715 reinterpret_cast<const uint64_t &>(set_node->set), __LINE__, 2716 DRAWSTATE_INVALID_BUFFER, "DS", 2717 "VkDescriptorSet (%#" PRIxLEAST64 ") %s (%#" PRIxLEAST64 ") at index" 2718 " #%u, has no memory bound to it!", 2719 reinterpret_cast<const uint64_t &>(set_node->set), 2720 string_VkDescriptorType(pWDS->descriptorType), 2721 reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), i); 2722 } 2723 } 2724 // If it's a dynamic buffer, make sure the offsets are within the buffer. 2725 if ((pWDS->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) || 2726 (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) { 2727 bufferSize = dev_data->bufferMap[pWDS->pBufferInfo[j].buffer].createInfo.size; 2728 uint32_t dynOffset = 2729 pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].dynamicOffsets[dynOffsetIndex]; 2730 if (pWDS->pBufferInfo[j].range == VK_WHOLE_SIZE) { 2731 if ((dynOffset + pWDS->pBufferInfo[j].offset) > bufferSize) { 2732 result |= log_msg( 2733 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2734 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 2735 reinterpret_cast<const uint64_t &>(set_node->set), __LINE__, 2736 DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, "DS", 2737 "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has range of " 2738 "VK_WHOLE_SIZE but dynamic offset %#" PRIxLEAST32 ". " 2739 "combined with offset %#" PRIxLEAST64 " oversteps its buffer (%#" PRIxLEAST64 2740 ") which has a size of %#" PRIxLEAST64 ".", 2741 reinterpret_cast<const uint64_t &>(set_node->set), i, dynOffset, 2742 pWDS->pBufferInfo[j].offset, 2743 reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize); 2744 } 2745 } else if ((dynOffset + pWDS->pBufferInfo[j].offset + pWDS->pBufferInfo[j].range) > 2746 bufferSize) { 2747 result |= 2748 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2749 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 2750 reinterpret_cast<const uint64_t &>(set_node->set), __LINE__, 2751 DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, "DS", 2752 "VkDescriptorSet (%#" PRIxLEAST64 2753 ") bound as set #%u has dynamic offset %#" PRIxLEAST32 ". " 2754 "Combined with offset %#" PRIxLEAST64 " and range %#" PRIxLEAST64 2755 " from its update, this oversteps its buffer " 2756 "(%#" PRIxLEAST64 ") which has a size of %#" PRIxLEAST64 ".", 2757 reinterpret_cast<const uint64_t &>(set_node->set), i, dynOffset, 2758 pWDS->pBufferInfo[j].offset, pWDS->pBufferInfo[j].range, 2759 reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize); 2760 } 2761 dynOffsetIndex++; 2762 } 2763 } 2764 } 2765 if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) { 2766 for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) { 2767 pCB->updateImages.insert(pWDS->pImageInfo[j].imageView); 2768 } 2769 } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) { 2770 for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) { 2771 assert(dev_data->bufferViewMap.find(pWDS->pTexelBufferView[j]) != dev_data->bufferViewMap.end()); 2772 pCB->updateBuffers.insert(dev_data->bufferViewMap[pWDS->pTexelBufferView[j]].buffer); 2773 } 2774 } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER || 2775 pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) { 2776 for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) { 2777 pCB->updateBuffers.insert(pWDS->pBufferInfo[j].buffer); 2778 } 2779 } 2780 i += pWDS->descriptorCount; // Advance i to end of this set of descriptors (++i at end of for loop will move 1 2781 // index past last of these descriptors) 2782 break; 2783 default: // Currently only shadowing Write update nodes so shouldn't get here 2784 assert(0); 2785 continue; 2786 } 2787 } 2788 } 2789 } 2790 } 2791 return result; 2792} 2793// TODO : This is a temp function that naively updates bound storage images and buffers based on which descriptor sets are bound. 2794// When validate_and_update_draw_state() handles computer shaders so that active_slots is correct for compute pipelines, this 2795// function can be killed and validate_and_update_draw_state() used instead 2796static void update_shader_storage_images_and_buffers(layer_data *dev_data, GLOBAL_CB_NODE *pCB) { 2797 VkWriteDescriptorSet *pWDS = nullptr; 2798 SET_NODE *pSet = nullptr; 2799 // For the bound descriptor sets, pull off any storage images and buffers 2800 // This may be more than are actually updated depending on which are active, but for now this is a stop-gap for compute 2801 // pipelines 2802 for (auto set : pCB->lastBound[VK_PIPELINE_BIND_POINT_COMPUTE].uniqueBoundSets) { 2803 // Get the set node 2804 pSet = getSetNode(dev_data, set); 2805 // For each update in the set 2806 for (auto pUpdate : pSet->pDescriptorUpdates) { 2807 // If it's a write update to STORAGE type capture image/buffer being updated 2808 if (pUpdate && (pUpdate->sType == VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET)) { 2809 pWDS = reinterpret_cast<VkWriteDescriptorSet *>(pUpdate); 2810 if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) { 2811 for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) { 2812 pCB->updateImages.insert(pWDS->pImageInfo[j].imageView); 2813 } 2814 } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) { 2815 for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) { 2816 pCB->updateBuffers.insert(dev_data->bufferViewMap[pWDS->pTexelBufferView[j]].buffer); 2817 } 2818 } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER || 2819 pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) { 2820 for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) { 2821 pCB->updateBuffers.insert(pWDS->pBufferInfo[j].buffer); 2822 } 2823 } 2824 } 2825 } 2826 } 2827} 2828 2829// Validate overall state at the time of a draw call 2830static bool validate_and_update_draw_state(layer_data *my_data, GLOBAL_CB_NODE *pCB, const bool indexedDraw, 2831 const VkPipelineBindPoint bindPoint) { 2832 bool result = false; 2833 auto const &state = pCB->lastBound[bindPoint]; 2834 PIPELINE_NODE *pPipe = getPipeline(my_data, state.pipeline); 2835 // First check flag states 2836 if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint) 2837 result = validate_draw_state_flags(my_data, pCB, pPipe, indexedDraw); 2838 2839 // Now complete other state checks 2840 // TODO : Currently only performing next check if *something* was bound (non-zero last bound) 2841 // There is probably a better way to gate when this check happens, and to know if something *should* have been bound 2842 // We should have that check separately and then gate this check based on that check 2843 if (pPipe) { 2844 if (state.pipelineLayout) { 2845 string errorString; 2846 // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets 2847 vector<std::pair<SET_NODE *, unordered_set<uint32_t>>> activeSetBindingsPairs; 2848 for (auto setBindingPair : pPipe->active_slots) { 2849 uint32_t setIndex = setBindingPair.first; 2850 // If valid set is not bound throw an error 2851 if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) { 2852 result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 2853 __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS", 2854 "VkPipeline %#" PRIxLEAST64 " uses set #%u but that set is not bound.", 2855 (uint64_t)pPipe->pipeline, setIndex); 2856 } else if (!verify_set_layout_compatibility(my_data, my_data->setMap[state.boundDescriptorSets[setIndex]], 2857 pPipe->graphicsPipelineCI.layout, setIndex, errorString)) { 2858 // Set is bound but not compatible w/ overlapping pipelineLayout from PSO 2859 VkDescriptorSet setHandle = my_data->setMap[state.boundDescriptorSets[setIndex]]->set; 2860 result |= log_msg( 2861 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 2862 (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS", 2863 "VkDescriptorSet (%#" PRIxLEAST64 2864 ") bound as set #%u is not compatible with overlapping VkPipelineLayout %#" PRIxLEAST64 " due to: %s", 2865 (uint64_t)setHandle, setIndex, (uint64_t)pPipe->graphicsPipelineCI.layout, errorString.c_str()); 2866 } else { // Valid set is bound and layout compatible, validate that it's updated 2867 // Pull the set node 2868 SET_NODE *pSet = my_data->setMap[state.boundDescriptorSets[setIndex]]; 2869 // Save vector of all active sets to verify dynamicOffsets below 2870 activeSetBindingsPairs.push_back(std::make_pair(pSet, setBindingPair.second)); 2871 // Make sure set has been updated if it has no immutable samplers 2872 // If it has immutable samplers, we'll flag error later as needed depending on binding 2873 if (!pSet->pUpdateStructs && !pSet->pLayout->immutableSamplerCount) { 2874 result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2875 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pSet->set, __LINE__, 2876 DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS", 2877 "DS %#" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so " 2878 "this will result in undefined behavior.", 2879 (uint64_t)pSet->set); 2880 } 2881 } 2882 } 2883 // For given active slots, verify any dynamic descriptors and record updated images & buffers 2884 result |= validate_and_update_drawtime_descriptor_state(my_data, pCB, activeSetBindingsPairs); 2885 } 2886 if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint) { 2887 // Verify Vtx binding 2888 if (pPipe->vertexBindingDescriptions.size() > 0) { 2889 for (size_t i = 0; i < pPipe->vertexBindingDescriptions.size(); i++) { 2890 if ((pCB->currentDrawData.buffers.size() < (i + 1)) || (pCB->currentDrawData.buffers[i] == VK_NULL_HANDLE)) { 2891 result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 2892 __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS", 2893 "The Pipeline State Object (%#" PRIxLEAST64 2894 ") expects that this Command Buffer's vertex binding Index " PRINTF_SIZE_T_SPECIFIER 2895 " should be set via vkCmdBindVertexBuffers.", 2896 (uint64_t)state.pipeline, i); 2897 } 2898 } 2899 } else { 2900 if (!pCB->currentDrawData.buffers.empty()) { 2901 result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, 2902 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS", 2903 "Vertex buffers are bound to command buffer (%#" PRIxLEAST64 2904 ") but no vertex buffers are attached to this Pipeline State Object (%#" PRIxLEAST64 ").", 2905 (uint64_t)pCB->commandBuffer, (uint64_t)state.pipeline); 2906 } 2907 } 2908 // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count. 2909 // Skip check if rasterization is disabled or there is no viewport. 2910 if ((!pPipe->graphicsPipelineCI.pRasterizationState || 2911 (pPipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) && 2912 pPipe->graphicsPipelineCI.pViewportState) { 2913 bool dynViewport = isDynamic(pPipe, VK_DYNAMIC_STATE_VIEWPORT); 2914 bool dynScissor = isDynamic(pPipe, VK_DYNAMIC_STATE_SCISSOR); 2915 if (dynViewport) { 2916 if (pCB->viewports.size() != pPipe->graphicsPipelineCI.pViewportState->viewportCount) { 2917 result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 2918 __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", 2919 "Dynamic viewportCount from vkCmdSetViewport() is " PRINTF_SIZE_T_SPECIFIER 2920 ", but PSO viewportCount is %u. These counts must match.", 2921 pCB->viewports.size(), pPipe->graphicsPipelineCI.pViewportState->viewportCount); 2922 } 2923 } 2924 if (dynScissor) { 2925 if (pCB->scissors.size() != pPipe->graphicsPipelineCI.pViewportState->scissorCount) { 2926 result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 2927 __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", 2928 "Dynamic scissorCount from vkCmdSetScissor() is " PRINTF_SIZE_T_SPECIFIER 2929 ", but PSO scissorCount is %u. These counts must match.", 2930 pCB->scissors.size(), pPipe->graphicsPipelineCI.pViewportState->scissorCount); 2931 } 2932 } 2933 } 2934 } 2935 } 2936 return result; 2937} 2938 2939// Verify that create state for a pipeline is valid 2940static bool verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_NODE *> pPipelines, 2941 int pipelineIndex) { 2942 bool skipCall = false; 2943 2944 PIPELINE_NODE *pPipeline = pPipelines[pipelineIndex]; 2945 2946 // If create derivative bit is set, check that we've specified a base 2947 // pipeline correctly, and that the base pipeline was created to allow 2948 // derivatives. 2949 if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) { 2950 PIPELINE_NODE *pBasePipeline = nullptr; 2951 if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^ 2952 (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) { 2953 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 2954 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", 2955 "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified"); 2956 } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) { 2957 if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) { 2958 skipCall |= 2959 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 2960 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", 2961 "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline."); 2962 } else { 2963 pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex]; 2964 } 2965 } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) { 2966 pBasePipeline = getPipeline(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle); 2967 } 2968 2969 if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) { 2970 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 2971 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", 2972 "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives."); 2973 } 2974 } 2975 2976 if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) { 2977 if (!my_data->phys_dev_properties.features.independentBlend) { 2978 if (pPipeline->attachments.size() > 1) { 2979 VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0]; 2980 for (size_t i = 1; i < pPipeline->attachments.size(); i++) { 2981 if ((pAttachments[0].blendEnable != pAttachments[i].blendEnable) || 2982 (pAttachments[0].srcColorBlendFactor != pAttachments[i].srcColorBlendFactor) || 2983 (pAttachments[0].dstColorBlendFactor != pAttachments[i].dstColorBlendFactor) || 2984 (pAttachments[0].colorBlendOp != pAttachments[i].colorBlendOp) || 2985 (pAttachments[0].srcAlphaBlendFactor != pAttachments[i].srcAlphaBlendFactor) || 2986 (pAttachments[0].dstAlphaBlendFactor != pAttachments[i].dstAlphaBlendFactor) || 2987 (pAttachments[0].alphaBlendOp != pAttachments[i].alphaBlendOp) || 2988 (pAttachments[0].colorWriteMask != pAttachments[i].colorWriteMask)) { 2989 skipCall |= 2990 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 2991 DRAWSTATE_INDEPENDENT_BLEND, "DS", "Invalid Pipeline CreateInfo: If independent blend feature not " 2992 "enabled, all elements of pAttachments must be identical"); 2993 } 2994 } 2995 } 2996 } 2997 if (!my_data->phys_dev_properties.features.logicOp && 2998 (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) { 2999 skipCall |= 3000 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3001 DRAWSTATE_DISABLED_LOGIC_OP, "DS", 3002 "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE"); 3003 } 3004 if ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable == VK_TRUE) && 3005 ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOp < VK_LOGIC_OP_CLEAR) || 3006 (pPipeline->graphicsPipelineCI.pColorBlendState->logicOp > VK_LOGIC_OP_SET))) { 3007 skipCall |= 3008 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3009 DRAWSTATE_INVALID_LOGIC_OP, "DS", 3010 "Invalid Pipeline CreateInfo: If logicOpEnable is VK_TRUE, logicOp must be a valid VkLogicOp value"); 3011 } 3012 } 3013 3014 // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state 3015 // produces nonsense errors that confuse users. Other layers should already 3016 // emit errors for renderpass being invalid. 3017 auto rp_data = my_data->renderPassMap.find(pPipeline->graphicsPipelineCI.renderPass); 3018 if (rp_data != my_data->renderPassMap.end() && 3019 pPipeline->graphicsPipelineCI.subpass >= rp_data->second->pCreateInfo->subpassCount) { 3020 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3021 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u " 3022 "is out of range for this renderpass (0..%u)", 3023 pPipeline->graphicsPipelineCI.subpass, rp_data->second->pCreateInfo->subpassCount - 1); 3024 } 3025 3026 if (!validate_and_capture_pipeline_shader_state(my_data, pPipeline)) { 3027 skipCall = true; 3028 } 3029 // Each shader's stage must be unique 3030 if (pPipeline->duplicate_shaders) { 3031 for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) { 3032 if (pPipeline->duplicate_shaders & stage) { 3033 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 3034 __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", 3035 "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s", 3036 string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage))); 3037 } 3038 } 3039 } 3040 // VS is required 3041 if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) { 3042 skipCall |= 3043 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3044 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vtx Shader required"); 3045 } 3046 // Either both or neither TC/TE shaders should be defined 3047 if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) != 3048 ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) { 3049 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3050 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", 3051 "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair"); 3052 } 3053 // Compute shaders should be specified independent of Gfx shaders 3054 if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) && 3055 (pPipeline->active_shaders & 3056 (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT | 3057 VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) { 3058 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3059 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", 3060 "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline"); 3061 } 3062 // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines. 3063 // Mismatching primitive topology and tessellation fails graphics pipeline creation. 3064 if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) && 3065 (!pPipeline->graphicsPipelineCI.pInputAssemblyState || 3066 pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) { 3067 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3068 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: " 3069 "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA " 3070 "topology for tessellation pipelines"); 3071 } 3072 if (pPipeline->graphicsPipelineCI.pInputAssemblyState && 3073 pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) { 3074 if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) { 3075 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3076 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: " 3077 "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive " 3078 "topology is only valid for tessellation pipelines"); 3079 } 3080 if (!pPipeline->graphicsPipelineCI.pTessellationState) { 3081 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3082 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", 3083 "Invalid Pipeline CreateInfo State: " 3084 "pTessellationState is NULL when VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive " 3085 "topology used. pTessellationState must not be NULL in this case."); 3086 } else if (!pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints || 3087 (pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints > 32)) { 3088 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3089 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: " 3090 "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive " 3091 "topology used with patchControlPoints value %u." 3092 " patchControlPoints should be >0 and <=32.", 3093 pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints); 3094 } 3095 } 3096 // Viewport state must be included if rasterization is enabled. 3097 // If the viewport state is included, the viewport and scissor counts should always match. 3098 // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler 3099 if (!pPipeline->graphicsPipelineCI.pRasterizationState || 3100 (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) { 3101 if (!pPipeline->graphicsPipelineCI.pViewportState) { 3102 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3103 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport " 3104 "and scissors are dynamic PSO must include " 3105 "viewportCount and scissorCount in pViewportState."); 3106 } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount != 3107 pPipeline->graphicsPipelineCI.pViewportState->viewportCount) { 3108 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3109 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", 3110 "Gfx Pipeline viewport count (%u) must match scissor count (%u).", 3111 pPipeline->graphicsPipelineCI.pViewportState->viewportCount, 3112 pPipeline->graphicsPipelineCI.pViewportState->scissorCount); 3113 } else { 3114 // If viewport or scissor are not dynamic, then verify that data is appropriate for count 3115 bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT); 3116 bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR); 3117 if (!dynViewport) { 3118 if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount && 3119 !pPipeline->graphicsPipelineCI.pViewportState->pViewports) { 3120 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 3121 __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", 3122 "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you " 3123 "must either include pViewports data, or include viewport in pDynamicState and set it with " 3124 "vkCmdSetViewport().", 3125 pPipeline->graphicsPipelineCI.pViewportState->viewportCount); 3126 } 3127 } 3128 if (!dynScissor) { 3129 if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount && 3130 !pPipeline->graphicsPipelineCI.pViewportState->pScissors) { 3131 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 3132 __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", 3133 "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you " 3134 "must either include pScissors data, or include scissor in pDynamicState and set it with " 3135 "vkCmdSetScissor().", 3136 pPipeline->graphicsPipelineCI.pViewportState->scissorCount); 3137 } 3138 } 3139 } 3140 } 3141 return skipCall; 3142} 3143 3144// Free the Pipeline nodes 3145static void deletePipelines(layer_data *my_data) { 3146 if (my_data->pipelineMap.size() <= 0) 3147 return; 3148 for (auto &pipe_map_pair : my_data->pipelineMap) { 3149 delete pipe_map_pair.second; 3150 } 3151 my_data->pipelineMap.clear(); 3152} 3153 3154// For given pipeline, return number of MSAA samples, or one if MSAA disabled 3155static VkSampleCountFlagBits getNumSamples(layer_data *my_data, const VkPipeline pipeline) { 3156 PIPELINE_NODE *pPipe = my_data->pipelineMap[pipeline]; 3157 if (pPipe->graphicsPipelineCI.pMultisampleState && 3158 (VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pPipe->graphicsPipelineCI.pMultisampleState->sType)) { 3159 return pPipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples; 3160 } 3161 return VK_SAMPLE_COUNT_1_BIT; 3162} 3163 3164// Validate state related to the PSO 3165static bool validatePipelineState(layer_data *my_data, const GLOBAL_CB_NODE *pCB, const VkPipelineBindPoint pipelineBindPoint, 3166 const VkPipeline pipeline) { 3167 bool skipCall = false; 3168 if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) { 3169 // Verify that any MSAA request in PSO matches sample# in bound FB 3170 // Skip the check if rasterization is disabled. 3171 PIPELINE_NODE *pPipeline = my_data->pipelineMap[pipeline]; 3172 if (!pPipeline->graphicsPipelineCI.pRasterizationState || 3173 (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) { 3174 VkSampleCountFlagBits psoNumSamples = getNumSamples(my_data, pipeline); 3175 if (pCB->activeRenderPass) { 3176 const VkRenderPassCreateInfo *pRPCI = my_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo; 3177 const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass]; 3178 VkSampleCountFlagBits subpassNumSamples = (VkSampleCountFlagBits)0; 3179 uint32_t i; 3180 3181 const VkPipelineColorBlendStateCreateInfo *pColorBlendState = pPipeline->graphicsPipelineCI.pColorBlendState; 3182 if ((pColorBlendState != NULL) && (pCB->activeSubpass == pPipeline->graphicsPipelineCI.subpass) && 3183 (pColorBlendState->attachmentCount != pSD->colorAttachmentCount)) { 3184 return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, 3185 reinterpret_cast<const uint64_t &>(pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 3186 "Render pass subpass %u mismatch with blending state defined and blend state attachment " 3187 "count %u but subpass color attachment count %u! These must be the same.", 3188 pCB->activeSubpass, pColorBlendState->attachmentCount, pSD->colorAttachmentCount); 3189 } 3190 3191 for (i = 0; i < pSD->colorAttachmentCount; i++) { 3192 VkSampleCountFlagBits samples; 3193 3194 if (pSD->pColorAttachments[i].attachment == VK_ATTACHMENT_UNUSED) 3195 continue; 3196 3197 samples = pRPCI->pAttachments[pSD->pColorAttachments[i].attachment].samples; 3198 if (subpassNumSamples == (VkSampleCountFlagBits)0) { 3199 subpassNumSamples = samples; 3200 } else if (subpassNumSamples != samples) { 3201 subpassNumSamples = (VkSampleCountFlagBits)-1; 3202 break; 3203 } 3204 } 3205 if (pSD->pDepthStencilAttachment && pSD->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { 3206 const VkSampleCountFlagBits samples = pRPCI->pAttachments[pSD->pDepthStencilAttachment->attachment].samples; 3207 if (subpassNumSamples == (VkSampleCountFlagBits)0) 3208 subpassNumSamples = samples; 3209 else if (subpassNumSamples != samples) 3210 subpassNumSamples = (VkSampleCountFlagBits)-1; 3211 } 3212 3213 if ((pSD->colorAttachmentCount > 0 || pSD->pDepthStencilAttachment) && 3214 psoNumSamples != subpassNumSamples) { 3215 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, 3216 (uint64_t)pipeline, __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS", 3217 "Num samples mismatch! Binding PSO (%#" PRIxLEAST64 3218 ") with %u samples while current RenderPass (%#" PRIxLEAST64 ") w/ %u samples!", 3219 (uint64_t)pipeline, psoNumSamples, (uint64_t)pCB->activeRenderPass, subpassNumSamples); 3220 } 3221 } else { 3222 // TODO : I believe it's an error if we reach this point and don't have an activeRenderPass 3223 // Verify and flag error as appropriate 3224 } 3225 } 3226 // TODO : Add more checks here 3227 } else { 3228 // TODO : Validate non-gfx pipeline updates 3229 } 3230 return skipCall; 3231} 3232 3233// Block of code at start here specifically for managing/tracking DSs 3234 3235// Return Pool node ptr for specified pool or else NULL 3236static DESCRIPTOR_POOL_NODE *getPoolNode(layer_data *my_data, const VkDescriptorPool pool) { 3237 if (my_data->descriptorPoolMap.find(pool) == my_data->descriptorPoolMap.end()) { 3238 return NULL; 3239 } 3240 return my_data->descriptorPoolMap[pool]; 3241} 3242 3243static LAYOUT_NODE *getLayoutNode(layer_data *my_data, const VkDescriptorSetLayout layout) { 3244 if (my_data->descriptorSetLayoutMap.find(layout) == my_data->descriptorSetLayoutMap.end()) { 3245 return NULL; 3246 } 3247 return my_data->descriptorSetLayoutMap[layout]; 3248} 3249 3250// Return false if update struct is of valid type, otherwise flag error and return code from callback 3251static bool validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) { 3252 switch (pUpdateStruct->sType) { 3253 case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET: 3254 case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET: 3255 return false; 3256 default: 3257 return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3258 DRAWSTATE_INVALID_UPDATE_STRUCT, "DS", 3259 "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree", 3260 string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType); 3261 } 3262} 3263 3264// Set count for given update struct in the last parameter 3265static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) { 3266 switch (pUpdateStruct->sType) { 3267 case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET: 3268 return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount; 3269 case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET: 3270 // TODO : Need to understand this case better and make sure code is correct 3271 return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount; 3272 default: 3273 return 0; 3274 } 3275} 3276 3277// For given layout and update, return the first overall index of the layout that is updated 3278static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout, const uint32_t binding, 3279 const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) { 3280 return getBindingStartIndex(pLayout, binding) + arrayIndex; 3281} 3282 3283// For given layout and update, return the last overall index of the layout that is updated 3284static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout, const uint32_t binding, 3285 const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) { 3286 uint32_t count = getUpdateCount(my_data, device, pUpdateStruct); 3287 return getBindingStartIndex(pLayout, binding) + arrayIndex + count - 1; 3288} 3289 3290// Verify that the descriptor type in the update struct matches what's expected by the layout 3291static bool validateUpdateConsistency(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout, 3292 const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) { 3293 // First get actual type of update 3294 bool skipCall = false; 3295 VkDescriptorType actualType = VK_DESCRIPTOR_TYPE_MAX_ENUM; 3296 uint32_t i = 0; 3297 switch (pUpdateStruct->sType) { 3298 case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET: 3299 actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType; 3300 break; 3301 case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET: 3302 /* no need to validate */ 3303 return false; 3304 break; 3305 default: 3306 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3307 DRAWSTATE_INVALID_UPDATE_STRUCT, "DS", 3308 "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree", 3309 string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType); 3310 } 3311 if (!skipCall) { 3312 // Set first stageFlags as reference and verify that all other updates match it 3313 VkShaderStageFlags refStageFlags = pLayout->stageFlags[startIndex]; 3314 for (i = startIndex; i <= endIndex; i++) { 3315 if (pLayout->descriptorTypes[i] != actualType) { 3316 skipCall |= log_msg( 3317 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3318 DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS", 3319 "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!", 3320 string_VkDescriptorType(actualType), string_VkDescriptorType(pLayout->descriptorTypes[i])); 3321 } 3322 if (pLayout->stageFlags[i] != refStageFlags) { 3323 skipCall |= log_msg( 3324 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3325 DRAWSTATE_DESCRIPTOR_STAGEFLAGS_MISMATCH, "DS", 3326 "Write descriptor update has stageFlags %x that do not match overlapping binding descriptor stageFlags of %x!", 3327 refStageFlags, pLayout->stageFlags[i]); 3328 } 3329 } 3330 } 3331 return skipCall; 3332} 3333 3334// Determine the update type, allocate a new struct of that type, shadow the given pUpdate 3335// struct into the pNewNode param. Return true if error condition encountered and callback signals early exit. 3336// NOTE : Calls to this function should be wrapped in mutex 3337static bool shadowUpdateNode(layer_data *my_data, const VkDevice device, GENERIC_HEADER *pUpdate, GENERIC_HEADER **pNewNode) { 3338 bool skipCall = false; 3339 VkWriteDescriptorSet *pWDS = NULL; 3340 VkCopyDescriptorSet *pCDS = NULL; 3341 switch (pUpdate->sType) { 3342 case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET: 3343 pWDS = new VkWriteDescriptorSet; 3344 *pNewNode = (GENERIC_HEADER *)pWDS; 3345 memcpy(pWDS, pUpdate, sizeof(VkWriteDescriptorSet)); 3346 3347 switch (pWDS->descriptorType) { 3348 case VK_DESCRIPTOR_TYPE_SAMPLER: 3349 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: 3350 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: 3351 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: { 3352 VkDescriptorImageInfo *info = new VkDescriptorImageInfo[pWDS->descriptorCount]; 3353 memcpy(info, pWDS->pImageInfo, pWDS->descriptorCount * sizeof(VkDescriptorImageInfo)); 3354 pWDS->pImageInfo = info; 3355 } break; 3356 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: 3357 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: { 3358 VkBufferView *info = new VkBufferView[pWDS->descriptorCount]; 3359 memcpy(info, pWDS->pTexelBufferView, pWDS->descriptorCount * sizeof(VkBufferView)); 3360 pWDS->pTexelBufferView = info; 3361 } break; 3362 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: 3363 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: 3364 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: 3365 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: { 3366 VkDescriptorBufferInfo *info = new VkDescriptorBufferInfo[pWDS->descriptorCount]; 3367 memcpy(info, pWDS->pBufferInfo, pWDS->descriptorCount * sizeof(VkDescriptorBufferInfo)); 3368 pWDS->pBufferInfo = info; 3369 } break; 3370 default: 3371 return true; 3372 break; 3373 } 3374 break; 3375 case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET: 3376 pCDS = new VkCopyDescriptorSet; 3377 *pNewNode = (GENERIC_HEADER *)pCDS; 3378 memcpy(pCDS, pUpdate, sizeof(VkCopyDescriptorSet)); 3379 break; 3380 default: 3381 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3382 DRAWSTATE_INVALID_UPDATE_STRUCT, "DS", 3383 "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree", 3384 string_VkStructureType(pUpdate->sType), pUpdate->sType)) 3385 return true; 3386 } 3387 // Make sure that pNext for the end of shadow copy is NULL 3388 (*pNewNode)->pNext = NULL; 3389 return skipCall; 3390} 3391 3392// Verify that given sampler is valid 3393static bool validateSampler(const layer_data *my_data, const VkSampler *pSampler, const bool immutable) { 3394 bool skipCall = false; 3395 auto sampIt = my_data->sampleMap.find(*pSampler); 3396 if (sampIt == my_data->sampleMap.end()) { 3397 if (!immutable) { 3398 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT, 3399 (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS", 3400 "vkUpdateDescriptorSets: Attempt to update descriptor with invalid sampler %#" PRIxLEAST64, 3401 (uint64_t)*pSampler); 3402 } else { // immutable 3403 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT, 3404 (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS", 3405 "vkUpdateDescriptorSets: Attempt to update descriptor whose binding has an invalid immutable " 3406 "sampler %#" PRIxLEAST64, 3407 (uint64_t)*pSampler); 3408 } 3409 } else { 3410 // TODO : Any further checks we want to do on the sampler? 3411 } 3412 return skipCall; 3413} 3414 3415//TODO: Consolidate functions 3416bool FindLayout(const GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) { 3417 layer_data *my_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map); 3418 if (!(imgpair.subresource.aspectMask & aspectMask)) { 3419 return false; 3420 } 3421 VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask; 3422 imgpair.subresource.aspectMask = aspectMask; 3423 auto imgsubIt = pCB->imageLayoutMap.find(imgpair); 3424 if (imgsubIt == pCB->imageLayoutMap.end()) { 3425 return false; 3426 } 3427 if (node.layout != VK_IMAGE_LAYOUT_MAX_ENUM && node.layout != imgsubIt->second.layout) { 3428 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 3429 reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS", 3430 "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s", 3431 reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.layout), string_VkImageLayout(imgsubIt->second.layout)); 3432 } 3433 if (node.initialLayout != VK_IMAGE_LAYOUT_MAX_ENUM && node.initialLayout != imgsubIt->second.initialLayout) { 3434 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 3435 reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS", 3436 "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple initial layout types: %s and %s", 3437 reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.initialLayout), string_VkImageLayout(imgsubIt->second.initialLayout)); 3438 } 3439 node = imgsubIt->second; 3440 return true; 3441} 3442 3443bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) { 3444 if (!(imgpair.subresource.aspectMask & aspectMask)) { 3445 return false; 3446 } 3447 VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask; 3448 imgpair.subresource.aspectMask = aspectMask; 3449 auto imgsubIt = my_data->imageLayoutMap.find(imgpair); 3450 if (imgsubIt == my_data->imageLayoutMap.end()) { 3451 return false; 3452 } 3453 if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) { 3454 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 3455 reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS", 3456 "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s", 3457 reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(layout), string_VkImageLayout(imgsubIt->second.layout)); 3458 } 3459 layout = imgsubIt->second.layout; 3460 return true; 3461} 3462 3463// find layout(s) on the cmd buf level 3464bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) { 3465 ImageSubresourcePair imgpair = {image, true, range}; 3466 node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM); 3467 FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT); 3468 FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_DEPTH_BIT); 3469 FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_STENCIL_BIT); 3470 FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_METADATA_BIT); 3471 if (node.layout == VK_IMAGE_LAYOUT_MAX_ENUM) { 3472 imgpair = {image, false, VkImageSubresource()}; 3473 auto imgsubIt = pCB->imageLayoutMap.find(imgpair); 3474 if (imgsubIt == pCB->imageLayoutMap.end()) 3475 return false; 3476 node = imgsubIt->second; 3477 } 3478 return true; 3479} 3480 3481// find layout(s) on the global level 3482bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) { 3483 layout = VK_IMAGE_LAYOUT_MAX_ENUM; 3484 FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT); 3485 FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT); 3486 FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT); 3487 FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT); 3488 if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) { 3489 imgpair = {imgpair.image, false, VkImageSubresource()}; 3490 auto imgsubIt = my_data->imageLayoutMap.find(imgpair); 3491 if (imgsubIt == my_data->imageLayoutMap.end()) 3492 return false; 3493 layout = imgsubIt->second.layout; 3494 } 3495 return true; 3496} 3497 3498bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) { 3499 ImageSubresourcePair imgpair = {image, true, range}; 3500 return FindLayout(my_data, imgpair, layout); 3501} 3502 3503bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) { 3504 auto sub_data = my_data->imageSubresourceMap.find(image); 3505 if (sub_data == my_data->imageSubresourceMap.end()) 3506 return false; 3507 auto imgIt = my_data->imageMap.find(image); 3508 if (imgIt == my_data->imageMap.end()) 3509 return false; 3510 bool ignoreGlobal = false; 3511 // TODO: Make this robust for >1 aspect mask. Now it will just say ignore 3512 // potential errors in this case. 3513 if (sub_data->second.size() >= (imgIt->second.createInfo.arrayLayers * imgIt->second.createInfo.mipLevels + 1)) { 3514 ignoreGlobal = true; 3515 } 3516 for (auto imgsubpair : sub_data->second) { 3517 if (ignoreGlobal && !imgsubpair.hasSubresource) 3518 continue; 3519 auto img_data = my_data->imageLayoutMap.find(imgsubpair); 3520 if (img_data != my_data->imageLayoutMap.end()) { 3521 layouts.push_back(img_data->second.layout); 3522 } 3523 } 3524 return true; 3525} 3526 3527// Set the layout on the global level 3528void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) { 3529 VkImage &image = imgpair.image; 3530 // TODO (mlentine): Maybe set format if new? Not used atm. 3531 my_data->imageLayoutMap[imgpair].layout = layout; 3532 // TODO (mlentine): Maybe make vector a set? 3533 auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair); 3534 if (subresource == my_data->imageSubresourceMap[image].end()) { 3535 my_data->imageSubresourceMap[image].push_back(imgpair); 3536 } 3537} 3538 3539// Set the layout on the cmdbuf level 3540void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) { 3541 pCB->imageLayoutMap[imgpair] = node; 3542 // TODO (mlentine): Maybe make vector a set? 3543 auto subresource = 3544 std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair); 3545 if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) { 3546 pCB->imageSubresourceMap[imgpair.image].push_back(imgpair); 3547 } 3548} 3549 3550void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) { 3551 // TODO (mlentine): Maybe make vector a set? 3552 if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) != 3553 pCB->imageSubresourceMap[imgpair.image].end()) { 3554 pCB->imageLayoutMap[imgpair].layout = layout; 3555 } else { 3556 // TODO (mlentine): Could be expensive and might need to be removed. 3557 assert(imgpair.hasSubresource); 3558 IMAGE_CMD_BUF_LAYOUT_NODE node; 3559 if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) { 3560 node.initialLayout = layout; 3561 } 3562 SetLayout(pCB, imgpair, {node.initialLayout, layout}); 3563 } 3564} 3565 3566template <class OBJECT, class LAYOUT> 3567void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) { 3568 if (imgpair.subresource.aspectMask & aspectMask) { 3569 imgpair.subresource.aspectMask = aspectMask; 3570 SetLayout(pObject, imgpair, layout); 3571 } 3572} 3573 3574template <class OBJECT, class LAYOUT> 3575void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) { 3576 ImageSubresourcePair imgpair = {image, true, range}; 3577 SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT); 3578 SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT); 3579 SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT); 3580 SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT); 3581} 3582 3583template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) { 3584 ImageSubresourcePair imgpair = {image, false, VkImageSubresource()}; 3585 SetLayout(pObject, image, imgpair, layout); 3586} 3587 3588void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) { 3589 auto image_view_data = dev_data->imageViewMap.find(imageView); 3590 assert(image_view_data != dev_data->imageViewMap.end()); 3591 const VkImage &image = image_view_data->second.image; 3592 const VkImageSubresourceRange &subRange = image_view_data->second.subresourceRange; 3593 // TODO: Do not iterate over every possibility - consolidate where possible 3594 for (uint32_t j = 0; j < subRange.levelCount; j++) { 3595 uint32_t level = subRange.baseMipLevel + j; 3596 for (uint32_t k = 0; k < subRange.layerCount; k++) { 3597 uint32_t layer = subRange.baseArrayLayer + k; 3598 VkImageSubresource sub = {subRange.aspectMask, level, layer}; 3599 SetLayout(pCB, image, sub, layout); 3600 } 3601 } 3602} 3603 3604// Verify that given imageView is valid 3605static bool validateImageView(const layer_data *my_data, const VkImageView *pImageView, const VkImageLayout imageLayout) { 3606 bool skipCall = false; 3607 auto ivIt = my_data->imageViewMap.find(*pImageView); 3608 if (ivIt == my_data->imageViewMap.end()) { 3609 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, 3610 (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS", 3611 "vkUpdateDescriptorSets: Attempt to update descriptor with invalid imageView %#" PRIxLEAST64, 3612 (uint64_t)*pImageView); 3613 } else { 3614 // Validate that imageLayout is compatible with aspectMask and image format 3615 VkImageAspectFlags aspectMask = ivIt->second.subresourceRange.aspectMask; 3616 VkImage image = ivIt->second.image; 3617 // TODO : Check here in case we have a bad image 3618 VkFormat format = VK_FORMAT_MAX_ENUM; 3619 auto imgIt = my_data->imageMap.find(image); 3620 if (imgIt != my_data->imageMap.end()) { 3621 format = (*imgIt).second.createInfo.format; 3622 } else { 3623 // Also need to check the swapchains. 3624 auto swapchainIt = my_data->device_extensions.imageToSwapchainMap.find(image); 3625 if (swapchainIt != my_data->device_extensions.imageToSwapchainMap.end()) { 3626 VkSwapchainKHR swapchain = swapchainIt->second; 3627 auto swapchain_nodeIt = my_data->device_extensions.swapchainMap.find(swapchain); 3628 if (swapchain_nodeIt != my_data->device_extensions.swapchainMap.end()) { 3629 SWAPCHAIN_NODE *pswapchain_node = swapchain_nodeIt->second; 3630 format = pswapchain_node->createInfo.imageFormat; 3631 } 3632 } 3633 } 3634 if (format == VK_FORMAT_MAX_ENUM) { 3635 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 3636 (uint64_t)image, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS", 3637 "vkUpdateDescriptorSets: Attempt to update descriptor with invalid image %#" PRIxLEAST64 3638 " in imageView %#" PRIxLEAST64, 3639 (uint64_t)image, (uint64_t)*pImageView); 3640 } else { 3641 bool ds = vk_format_is_depth_or_stencil(format); 3642 switch (imageLayout) { 3643 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: 3644 // Only Color bit must be set 3645 if ((aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) { 3646 skipCall |= 3647 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, 3648 (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS", 3649 "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL " 3650 "and imageView %#" PRIxLEAST64 "" 3651 " that does not have VK_IMAGE_ASPECT_COLOR_BIT set.", 3652 (uint64_t)*pImageView); 3653 } 3654 // format must NOT be DS 3655 if (ds) { 3656 skipCall |= 3657 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, 3658 (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS", 3659 "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL " 3660 "and imageView %#" PRIxLEAST64 "" 3661 " but the image format is %s which is not a color format.", 3662 (uint64_t)*pImageView, string_VkFormat(format)); 3663 } 3664 break; 3665 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: 3666 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: 3667 // Depth or stencil bit must be set, but both must NOT be set 3668 if (aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) { 3669 if (aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) { 3670 // both must NOT be set 3671 skipCall |= 3672 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, 3673 (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS", 3674 "vkUpdateDescriptorSets: Updating descriptor with imageView %#" PRIxLEAST64 "" 3675 " that has both STENCIL and DEPTH aspects set", 3676 (uint64_t)*pImageView); 3677 } 3678 } else if (!(aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)) { 3679 // Neither were set 3680 skipCall |= 3681 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, 3682 (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS", 3683 "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 "" 3684 " that does not have STENCIL or DEPTH aspect set.", 3685 string_VkImageLayout(imageLayout), (uint64_t)*pImageView); 3686 } 3687 // format must be DS 3688 if (!ds) { 3689 skipCall |= 3690 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, 3691 (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS", 3692 "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 "" 3693 " but the image format is %s which is not a depth/stencil format.", 3694 string_VkImageLayout(imageLayout), (uint64_t)*pImageView, string_VkFormat(format)); 3695 } 3696 break; 3697 default: 3698 // anything to check for other layouts? 3699 break; 3700 } 3701 } 3702 } 3703 return skipCall; 3704} 3705 3706// Verify that given bufferView is valid 3707static bool validateBufferView(const layer_data *my_data, const VkBufferView *pBufferView) { 3708 bool skipCall = false; 3709 auto sampIt = my_data->bufferViewMap.find(*pBufferView); 3710 if (sampIt == my_data->bufferViewMap.end()) { 3711 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT, 3712 (uint64_t)*pBufferView, __LINE__, DRAWSTATE_BUFFERVIEW_DESCRIPTOR_ERROR, "DS", 3713 "vkUpdateDescriptorSets: Attempt to update descriptor with invalid bufferView %#" PRIxLEAST64, 3714 (uint64_t)*pBufferView); 3715 } else { 3716 // TODO : Any further checks we want to do on the bufferView? 3717 } 3718 return skipCall; 3719} 3720 3721// Verify that given bufferInfo is valid 3722static bool validateBufferInfo(const layer_data *my_data, const VkDescriptorBufferInfo *pBufferInfo) { 3723 bool skipCall = false; 3724 auto sampIt = my_data->bufferMap.find(pBufferInfo->buffer); 3725 if (sampIt == my_data->bufferMap.end()) { 3726 skipCall |= 3727 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, 3728 (uint64_t)pBufferInfo->buffer, __LINE__, DRAWSTATE_BUFFERINFO_DESCRIPTOR_ERROR, "DS", 3729 "vkUpdateDescriptorSets: Attempt to update descriptor where bufferInfo has invalid buffer %#" PRIxLEAST64, 3730 (uint64_t)pBufferInfo->buffer); 3731 } else { 3732 // TODO : Any further checks we want to do on the bufferView? 3733 } 3734 return skipCall; 3735} 3736 3737static bool validateUpdateContents(const layer_data *my_data, const VkWriteDescriptorSet *pWDS, 3738 const VkDescriptorSetLayoutBinding *pLayoutBinding) { 3739 bool skipCall = false; 3740 // First verify that for the given Descriptor type, the correct DescriptorInfo data is supplied 3741 const VkSampler *pSampler = NULL; 3742 bool immutable = false; 3743 uint32_t i = 0; 3744 // For given update type, verify that update contents are correct 3745 switch (pWDS->descriptorType) { 3746 case VK_DESCRIPTOR_TYPE_SAMPLER: 3747 for (i = 0; i < pWDS->descriptorCount; ++i) { 3748 skipCall |= validateSampler(my_data, &(pWDS->pImageInfo[i].sampler), immutable); 3749 } 3750 break; 3751 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: 3752 for (i = 0; i < pWDS->descriptorCount; ++i) { 3753 if (NULL == pLayoutBinding->pImmutableSamplers) { 3754 pSampler = &(pWDS->pImageInfo[i].sampler); 3755 if (immutable) { 3756 skipCall |= log_msg( 3757 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT, 3758 (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS", 3759 "vkUpdateDescriptorSets: Update #%u is not an immutable sampler %#" PRIxLEAST64 3760 ", but previous update(s) from this " 3761 "VkWriteDescriptorSet struct used an immutable sampler. All updates from a single struct must either " 3762 "use immutable or non-immutable samplers.", 3763 i, (uint64_t)*pSampler); 3764 } 3765 } else { 3766 if (i > 0 && !immutable) { 3767 skipCall |= log_msg( 3768 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT, 3769 (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS", 3770 "vkUpdateDescriptorSets: Update #%u is an immutable sampler, but previous update(s) from this " 3771 "VkWriteDescriptorSet struct used a non-immutable sampler. All updates from a single struct must either " 3772 "use immutable or non-immutable samplers.", 3773 i); 3774 } 3775 immutable = true; 3776 pSampler = &(pLayoutBinding->pImmutableSamplers[i]); 3777 } 3778 skipCall |= validateSampler(my_data, pSampler, immutable); 3779 } 3780 // Intentionally fall through here to also validate image stuff 3781 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: 3782 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: 3783 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: 3784 for (i = 0; i < pWDS->descriptorCount; ++i) { 3785 skipCall |= validateImageView(my_data, &(pWDS->pImageInfo[i].imageView), pWDS->pImageInfo[i].imageLayout); 3786 } 3787 break; 3788 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: 3789 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: 3790 for (i = 0; i < pWDS->descriptorCount; ++i) { 3791 skipCall |= validateBufferView(my_data, &(pWDS->pTexelBufferView[i])); 3792 } 3793 break; 3794 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: 3795 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: 3796 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: 3797 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: 3798 for (i = 0; i < pWDS->descriptorCount; ++i) { 3799 skipCall |= validateBufferInfo(my_data, &(pWDS->pBufferInfo[i])); 3800 } 3801 break; 3802 default: 3803 break; 3804 } 3805 return skipCall; 3806} 3807// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer 3808// func_str is the name of the calling function 3809// Return false if no errors occur 3810// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain) 3811static bool validateIdleDescriptorSet(const layer_data *my_data, VkDescriptorSet set, std::string func_str) { 3812 bool skip_call = false; 3813 auto set_node = my_data->setMap.find(set); 3814 if (set_node == my_data->setMap.end()) { 3815 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 3816 (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS", 3817 "Cannot call %s() on descriptor set %" PRIxLEAST64 " that has not been allocated.", func_str.c_str(), 3818 (uint64_t)(set)); 3819 } else { 3820 if (set_node->second->in_use.load()) { 3821 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 3822 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_OBJECT_INUSE, 3823 "DS", "Cannot call %s() on descriptor set %" PRIxLEAST64 " that is in use by a command buffer.", 3824 func_str.c_str(), (uint64_t)(set)); 3825 } 3826 } 3827 return skip_call; 3828} 3829static void invalidateBoundCmdBuffers(layer_data *dev_data, const SET_NODE *pSet) { 3830 // Flag any CBs this set is bound to as INVALID 3831 for (auto cb : pSet->boundCmdBuffers) { 3832 auto cb_node = dev_data->commandBufferMap.find(cb); 3833 if (cb_node != dev_data->commandBufferMap.end()) { 3834 cb_node->second->state = CB_INVALID; 3835 } 3836 } 3837} 3838// update DS mappings based on write and copy update arrays 3839static bool dsUpdate(layer_data *my_data, VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pWDS, 3840 uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pCDS) { 3841 bool skipCall = false; 3842 3843 LAYOUT_NODE *pLayout = NULL; 3844 VkDescriptorSetLayoutCreateInfo *pLayoutCI = NULL; 3845 // Validate Write updates 3846 uint32_t i = 0; 3847 for (i = 0; i < descriptorWriteCount; i++) { 3848 VkDescriptorSet ds = pWDS[i].dstSet; 3849 SET_NODE *pSet = my_data->setMap[ds]; 3850 // Set being updated cannot be in-flight 3851 if ((skipCall = validateIdleDescriptorSet(my_data, ds, "VkUpdateDescriptorSets")) == true) 3852 return skipCall; 3853 // If set is bound to any cmdBuffers, mark them invalid 3854 invalidateBoundCmdBuffers(my_data, pSet); 3855 GENERIC_HEADER *pUpdate = (GENERIC_HEADER *)&pWDS[i]; 3856 pLayout = pSet->pLayout; 3857 // First verify valid update struct 3858 if ((skipCall = validUpdateStruct(my_data, device, pUpdate)) == true) { 3859 break; 3860 } 3861 uint32_t binding = 0, endIndex = 0; 3862 binding = pWDS[i].dstBinding; 3863 auto bindingToIndex = pLayout->bindingToIndexMap.find(binding); 3864 // Make sure that layout being updated has the binding being updated 3865 if (bindingToIndex == pLayout->bindingToIndexMap.end()) { 3866 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 3867 (uint64_t)(ds), __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS", 3868 "Descriptor Set %" PRIu64 " does not have binding to match " 3869 "update binding %u for update type " 3870 "%s!", 3871 (uint64_t)(ds), binding, string_VkStructureType(pUpdate->sType)); 3872 } else { 3873 // Next verify that update falls within size of given binding 3874 endIndex = getUpdateEndIndex(my_data, device, pLayout, binding, pWDS[i].dstArrayElement, pUpdate); 3875 if (getBindingEndIndex(pLayout, binding) < endIndex) { 3876 pLayoutCI = &pLayout->createInfo; 3877 string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS} "); 3878 skipCall |= 3879 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 3880 (uint64_t)(ds), __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS", 3881 "Descriptor update type of %s is out of bounds for matching binding %u in Layout w/ CI:\n%s!", 3882 string_VkStructureType(pUpdate->sType), binding, DSstr.c_str()); 3883 } else { // TODO : should we skip update on a type mismatch or force it? 3884 uint32_t startIndex; 3885 startIndex = getUpdateStartIndex(my_data, device, pLayout, binding, pWDS[i].dstArrayElement, pUpdate); 3886 // Layout bindings match w/ update, now verify that update type 3887 // & stageFlags are the same for entire update 3888 if ((skipCall = validateUpdateConsistency(my_data, device, pLayout, pUpdate, startIndex, endIndex)) == false) { 3889 // The update is within bounds and consistent, but need to 3890 // make sure contents make sense as well 3891 if ((skipCall = validateUpdateContents(my_data, &pWDS[i], 3892 &pLayout->createInfo.pBindings[bindingToIndex->second])) == false) { 3893 // Update is good. Save the update info 3894 // Create new update struct for this set's shadow copy 3895 GENERIC_HEADER *pNewNode = NULL; 3896 skipCall |= shadowUpdateNode(my_data, device, pUpdate, &pNewNode); 3897 if (NULL == pNewNode) { 3898 skipCall |= log_msg( 3899 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 3900 (uint64_t)(ds), __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", 3901 "Out of memory while attempting to allocate UPDATE struct in vkUpdateDescriptors()"); 3902 } else { 3903 // Insert shadow node into LL of updates for this set 3904 pNewNode->pNext = pSet->pUpdateStructs; 3905 pSet->pUpdateStructs = pNewNode; 3906 // Now update appropriate descriptor(s) to point to new Update node 3907 for (uint32_t j = startIndex; j <= endIndex; j++) { 3908 assert(j < pSet->descriptorCount); 3909 pSet->pDescriptorUpdates[j] = pNewNode; 3910 } 3911 } 3912 } 3913 } 3914 } 3915 } 3916 } 3917 // Now validate copy updates 3918 for (i = 0; i < descriptorCopyCount; ++i) { 3919 SET_NODE *pSrcSet = NULL, *pDstSet = NULL; 3920 LAYOUT_NODE *pSrcLayout = NULL, *pDstLayout = NULL; 3921 uint32_t srcStartIndex = 0, srcEndIndex = 0, dstStartIndex = 0, dstEndIndex = 0; 3922 // For each copy make sure that update falls within given layout and that types match 3923 pSrcSet = my_data->setMap[pCDS[i].srcSet]; 3924 pDstSet = my_data->setMap[pCDS[i].dstSet]; 3925 // Set being updated cannot be in-flight 3926 if ((skipCall = validateIdleDescriptorSet(my_data, pDstSet->set, "VkUpdateDescriptorSets")) == true) 3927 return skipCall; 3928 invalidateBoundCmdBuffers(my_data, pDstSet); 3929 pSrcLayout = pSrcSet->pLayout; 3930 pDstLayout = pDstSet->pLayout; 3931 // Validate that src binding is valid for src set layout 3932 if (pSrcLayout->bindingToIndexMap.find(pCDS[i].srcBinding) == pSrcLayout->bindingToIndexMap.end()) { 3933 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 3934 (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS", 3935 "Copy descriptor update %u has srcBinding %u " 3936 "which is out of bounds for underlying SetLayout " 3937 "%#" PRIxLEAST64 " which only has bindings 0-%u.", 3938 i, pCDS[i].srcBinding, (uint64_t)pSrcLayout->layout, pSrcLayout->createInfo.bindingCount - 1); 3939 } else if (pDstLayout->bindingToIndexMap.find(pCDS[i].dstBinding) == pDstLayout->bindingToIndexMap.end()) { 3940 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 3941 (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS", 3942 "Copy descriptor update %u has dstBinding %u " 3943 "which is out of bounds for underlying SetLayout " 3944 "%#" PRIxLEAST64 " which only has bindings 0-%u.", 3945 i, pCDS[i].dstBinding, (uint64_t)pDstLayout->layout, pDstLayout->createInfo.bindingCount - 1); 3946 } else { 3947 // Proceed with validation. Bindings are ok, but make sure update is within bounds of given layout 3948 srcEndIndex = getUpdateEndIndex(my_data, device, pSrcLayout, pCDS[i].srcBinding, pCDS[i].srcArrayElement, 3949 (const GENERIC_HEADER *)&(pCDS[i])); 3950 dstEndIndex = getUpdateEndIndex(my_data, device, pDstLayout, pCDS[i].dstBinding, pCDS[i].dstArrayElement, 3951 (const GENERIC_HEADER *)&(pCDS[i])); 3952 if (getBindingEndIndex(pSrcLayout, pCDS[i].srcBinding) < srcEndIndex) { 3953 pLayoutCI = &pSrcLayout->createInfo; 3954 string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS} "); 3955 skipCall |= 3956 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 3957 (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS", 3958 "Copy descriptor src update is out of bounds for matching binding %u in Layout w/ CI:\n%s!", 3959 pCDS[i].srcBinding, DSstr.c_str()); 3960 } else if (getBindingEndIndex(pDstLayout, pCDS[i].dstBinding) < dstEndIndex) { 3961 pLayoutCI = &pDstLayout->createInfo; 3962 string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS} "); 3963 skipCall |= 3964 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 3965 (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS", 3966 "Copy descriptor dest update is out of bounds for matching binding %u in Layout w/ CI:\n%s!", 3967 pCDS[i].dstBinding, DSstr.c_str()); 3968 } else { 3969 srcStartIndex = getUpdateStartIndex(my_data, device, pSrcLayout, pCDS[i].srcBinding, pCDS[i].srcArrayElement, 3970 (const GENERIC_HEADER *)&(pCDS[i])); 3971 dstStartIndex = getUpdateStartIndex(my_data, device, pDstLayout, pCDS[i].dstBinding, pCDS[i].dstArrayElement, 3972 (const GENERIC_HEADER *)&(pCDS[i])); 3973 for (uint32_t j = 0; j < pCDS[i].descriptorCount; ++j) { 3974 // For copy just make sure that the types match and then perform the update 3975 if (pSrcLayout->descriptorTypes[srcStartIndex + j] != pDstLayout->descriptorTypes[dstStartIndex + j]) { 3976 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 3977 __LINE__, DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS", 3978 "Copy descriptor update index %u, update count #%u, has src update descriptor type %s " 3979 "that does not match overlapping dest descriptor type of %s!", 3980 i, j + 1, string_VkDescriptorType(pSrcLayout->descriptorTypes[srcStartIndex + j]), 3981 string_VkDescriptorType(pDstLayout->descriptorTypes[dstStartIndex + j])); 3982 } else { 3983 // point dst descriptor at corresponding src descriptor 3984 // TODO : This may be a hole. I believe copy should be its own copy, 3985 // otherwise a subsequent write update to src will incorrectly affect the copy 3986 pDstSet->pDescriptorUpdates[j + dstStartIndex] = pSrcSet->pDescriptorUpdates[j + srcStartIndex]; 3987 pDstSet->pUpdateStructs = pSrcSet->pUpdateStructs; 3988 } 3989 } 3990 } 3991 } 3992 } 3993 return skipCall; 3994} 3995 3996// Verify that given pool has descriptors that are being requested for allocation. 3997// NOTE : Calls to this function should be wrapped in mutex 3998static bool validate_descriptor_availability_in_pool(layer_data *dev_data, DESCRIPTOR_POOL_NODE *pPoolNode, uint32_t count, 3999 const VkDescriptorSetLayout *pSetLayouts) { 4000 bool skipCall = false; 4001 uint32_t i = 0; 4002 uint32_t j = 0; 4003 4004 // Track number of descriptorSets allowable in this pool 4005 if (pPoolNode->availableSets < count) { 4006 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, 4007 reinterpret_cast<uint64_t &>(pPoolNode->pool), __LINE__, DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS", 4008 "Unable to allocate %u descriptorSets from pool %#" PRIxLEAST64 4009 ". This pool only has %d descriptorSets remaining.", 4010 count, reinterpret_cast<uint64_t &>(pPoolNode->pool), pPoolNode->availableSets); 4011 } else { 4012 pPoolNode->availableSets -= count; 4013 } 4014 4015 for (i = 0; i < count; ++i) { 4016 LAYOUT_NODE *pLayout = getLayoutNode(dev_data, pSetLayouts[i]); 4017 if (NULL == pLayout) { 4018 skipCall |= 4019 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, 4020 (uint64_t)pSetLayouts[i], __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS", 4021 "Unable to find set layout node for layout %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call", 4022 (uint64_t)pSetLayouts[i]); 4023 } else { 4024 uint32_t typeIndex = 0, poolSizeCount = 0; 4025 for (j = 0; j < pLayout->createInfo.bindingCount; ++j) { 4026 typeIndex = static_cast<uint32_t>(pLayout->createInfo.pBindings[j].descriptorType); 4027 poolSizeCount = pLayout->createInfo.pBindings[j].descriptorCount; 4028 if (poolSizeCount > pPoolNode->availableDescriptorTypeCount[typeIndex]) { 4029 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 4030 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)pLayout->layout, __LINE__, 4031 DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS", 4032 "Unable to allocate %u descriptors of type %s from pool %#" PRIxLEAST64 4033 ". This pool only has %d descriptors of this type remaining.", 4034 poolSizeCount, string_VkDescriptorType(pLayout->createInfo.pBindings[j].descriptorType), 4035 (uint64_t)pPoolNode->pool, pPoolNode->availableDescriptorTypeCount[typeIndex]); 4036 } else { // Decrement available descriptors of this type 4037 pPoolNode->availableDescriptorTypeCount[typeIndex] -= poolSizeCount; 4038 } 4039 } 4040 } 4041 } 4042 return skipCall; 4043} 4044 4045// Free the shadowed update node for this Set 4046// NOTE : Calls to this function should be wrapped in mutex 4047static void freeShadowUpdateTree(SET_NODE *pSet) { 4048 GENERIC_HEADER *pShadowUpdate = pSet->pUpdateStructs; 4049 pSet->pUpdateStructs = NULL; 4050 GENERIC_HEADER *pFreeUpdate = pShadowUpdate; 4051 // Clear the descriptor mappings as they will now be invalid 4052 pSet->pDescriptorUpdates.clear(); 4053 while (pShadowUpdate) { 4054 pFreeUpdate = pShadowUpdate; 4055 pShadowUpdate = (GENERIC_HEADER *)pShadowUpdate->pNext; 4056 VkWriteDescriptorSet *pWDS = NULL; 4057 switch (pFreeUpdate->sType) { 4058 case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET: 4059 pWDS = (VkWriteDescriptorSet *)pFreeUpdate; 4060 switch (pWDS->descriptorType) { 4061 case VK_DESCRIPTOR_TYPE_SAMPLER: 4062 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: 4063 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: 4064 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: { 4065 delete[] pWDS->pImageInfo; 4066 } break; 4067 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: 4068 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: { 4069 delete[] pWDS->pTexelBufferView; 4070 } break; 4071 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: 4072 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: 4073 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: 4074 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: { 4075 delete[] pWDS->pBufferInfo; 4076 } break; 4077 default: 4078 break; 4079 } 4080 break; 4081 case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET: 4082 break; 4083 default: 4084 assert(0); 4085 break; 4086 } 4087 delete pFreeUpdate; 4088 } 4089} 4090 4091// Free all DS Pools including their Sets & related sub-structs 4092// NOTE : Calls to this function should be wrapped in mutex 4093static void deletePools(layer_data *my_data) { 4094 if (my_data->descriptorPoolMap.size() <= 0) 4095 return; 4096 for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) { 4097 SET_NODE *pSet = (*ii).second->pSets; 4098 SET_NODE *pFreeSet = pSet; 4099 while (pSet) { 4100 pFreeSet = pSet; 4101 pSet = pSet->pNext; 4102 // Freeing layouts handled in deleteLayouts() function 4103 // Free Update shadow struct tree 4104 freeShadowUpdateTree(pFreeSet); 4105 delete pFreeSet; 4106 } 4107 delete (*ii).second; 4108 } 4109 my_data->descriptorPoolMap.clear(); 4110} 4111 4112// WARN : Once deleteLayouts() called, any layout ptrs in Pool/Set data structure will be invalid 4113// NOTE : Calls to this function should be wrapped in mutex 4114static void deleteLayouts(layer_data *my_data) { 4115 if (my_data->descriptorSetLayoutMap.size() <= 0) 4116 return; 4117 for (auto ii = my_data->descriptorSetLayoutMap.begin(); ii != my_data->descriptorSetLayoutMap.end(); ++ii) { 4118 LAYOUT_NODE *pLayout = (*ii).second; 4119 if (pLayout->createInfo.pBindings) { 4120 for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) { 4121 delete[] pLayout->createInfo.pBindings[i].pImmutableSamplers; 4122 } 4123 delete[] pLayout->createInfo.pBindings; 4124 } 4125 delete pLayout; 4126 } 4127 my_data->descriptorSetLayoutMap.clear(); 4128} 4129 4130// Currently clearing a set is removing all previous updates to that set 4131// TODO : Validate if this is correct clearing behavior 4132static void clearDescriptorSet(layer_data *my_data, VkDescriptorSet set) { 4133 SET_NODE *pSet = getSetNode(my_data, set); 4134 if (!pSet) { 4135 // TODO : Return error 4136 } else { 4137 freeShadowUpdateTree(pSet); 4138 } 4139} 4140 4141static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool, 4142 VkDescriptorPoolResetFlags flags) { 4143 DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pool); 4144 if (!pPool) { 4145 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, 4146 (uint64_t)pool, __LINE__, DRAWSTATE_INVALID_POOL, "DS", 4147 "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkResetDescriptorPool() call", (uint64_t)pool); 4148 } else { 4149 // TODO: validate flags 4150 // For every set off of this pool, clear it, remove from setMap, and free SET_NODE 4151 SET_NODE *pSet = pPool->pSets; 4152 SET_NODE *pFreeSet = pSet; 4153 while (pSet) { 4154 clearDescriptorSet(my_data, pSet->set); 4155 my_data->setMap.erase(pSet->set); 4156 pFreeSet = pSet; 4157 pSet = pSet->pNext; 4158 delete pFreeSet; 4159 } 4160 pPool->pSets = nullptr; 4161 // Reset available count for each type and available sets for this pool 4162 for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) { 4163 pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i]; 4164 } 4165 pPool->availableSets = pPool->maxSets; 4166 } 4167} 4168 4169// For given CB object, fetch associated CB Node from map 4170static GLOBAL_CB_NODE *getCBNode(layer_data *my_data, const VkCommandBuffer cb) { 4171 if (my_data->commandBufferMap.count(cb) == 0) { 4172 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 4173 reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 4174 "Attempt to use CommandBuffer %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb)); 4175 return NULL; 4176 } 4177 return my_data->commandBufferMap[cb]; 4178} 4179 4180// Free all CB Nodes 4181// NOTE : Calls to this function should be wrapped in mutex 4182static void deleteCommandBuffers(layer_data *my_data) { 4183 if (my_data->commandBufferMap.empty()) { 4184 return; 4185 } 4186 for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) { 4187 delete (*ii).second; 4188 } 4189 my_data->commandBufferMap.clear(); 4190} 4191 4192static bool report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) { 4193 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 4194 (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS", 4195 "You must call vkBeginCommandBuffer() before this call to %s", caller_name); 4196} 4197 4198bool validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) { 4199 if (!pCB->activeRenderPass) 4200 return false; 4201 bool skip_call = false; 4202 if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS && cmd_type != CMD_EXECUTECOMMANDS) { 4203 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 4204 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 4205 "Commands cannot be called in a subpass using secondary command buffers."); 4206 } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) { 4207 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 4208 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 4209 "vkCmdExecuteCommands() cannot be called in a subpass using inline commands."); 4210 } 4211 return skip_call; 4212} 4213 4214static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) { 4215 if (!(flags & VK_QUEUE_GRAPHICS_BIT)) 4216 return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 4217 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 4218 "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name); 4219 return false; 4220} 4221 4222static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) { 4223 if (!(flags & VK_QUEUE_COMPUTE_BIT)) 4224 return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 4225 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 4226 "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name); 4227 return false; 4228} 4229 4230static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) { 4231 if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT))) 4232 return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 4233 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 4234 "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name); 4235 return false; 4236} 4237 4238// Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not 4239// in the recording state or if there's an issue with the Cmd ordering 4240static bool addCmd(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) { 4241 bool skipCall = false; 4242 auto pool_data = my_data->commandPoolMap.find(pCB->createInfo.commandPool); 4243 if (pool_data != my_data->commandPoolMap.end()) { 4244 VkQueueFlags flags = my_data->phys_dev_properties.queue_family_properties[pool_data->second.queueFamilyIndex].queueFlags; 4245 switch (cmd) { 4246 case CMD_BINDPIPELINE: 4247 case CMD_BINDPIPELINEDELTA: 4248 case CMD_BINDDESCRIPTORSETS: 4249 case CMD_FILLBUFFER: 4250 case CMD_CLEARCOLORIMAGE: 4251 case CMD_SETEVENT: 4252 case CMD_RESETEVENT: 4253 case CMD_WAITEVENTS: 4254 case CMD_BEGINQUERY: 4255 case CMD_ENDQUERY: 4256 case CMD_RESETQUERYPOOL: 4257 case CMD_COPYQUERYPOOLRESULTS: 4258 case CMD_WRITETIMESTAMP: 4259 skipCall |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str()); 4260 break; 4261 case CMD_SETVIEWPORTSTATE: 4262 case CMD_SETSCISSORSTATE: 4263 case CMD_SETLINEWIDTHSTATE: 4264 case CMD_SETDEPTHBIASSTATE: 4265 case CMD_SETBLENDSTATE: 4266 case CMD_SETDEPTHBOUNDSSTATE: 4267 case CMD_SETSTENCILREADMASKSTATE: 4268 case CMD_SETSTENCILWRITEMASKSTATE: 4269 case CMD_SETSTENCILREFERENCESTATE: 4270 case CMD_BINDINDEXBUFFER: 4271 case CMD_BINDVERTEXBUFFER: 4272 case CMD_DRAW: 4273 case CMD_DRAWINDEXED: 4274 case CMD_DRAWINDIRECT: 4275 case CMD_DRAWINDEXEDINDIRECT: 4276 case CMD_BLITIMAGE: 4277 case CMD_CLEARATTACHMENTS: 4278 case CMD_CLEARDEPTHSTENCILIMAGE: 4279 case CMD_RESOLVEIMAGE: 4280 case CMD_BEGINRENDERPASS: 4281 case CMD_NEXTSUBPASS: 4282 case CMD_ENDRENDERPASS: 4283 skipCall |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str()); 4284 break; 4285 case CMD_DISPATCH: 4286 case CMD_DISPATCHINDIRECT: 4287 skipCall |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str()); 4288 break; 4289 case CMD_COPYBUFFER: 4290 case CMD_COPYIMAGE: 4291 case CMD_COPYBUFFERTOIMAGE: 4292 case CMD_COPYIMAGETOBUFFER: 4293 case CMD_CLONEIMAGEDATA: 4294 case CMD_UPDATEBUFFER: 4295 case CMD_PIPELINEBARRIER: 4296 case CMD_EXECUTECOMMANDS: 4297 break; 4298 default: 4299 break; 4300 } 4301 } 4302 if (pCB->state != CB_RECORDING) { 4303 skipCall |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name); 4304 skipCall |= validateCmdsInCmdBuffer(my_data, pCB, cmd); 4305 CMD_NODE cmdNode = {}; 4306 // init cmd node and append to end of cmd LL 4307 cmdNode.cmdNumber = ++pCB->numCmds; 4308 cmdNode.type = cmd; 4309 pCB->cmds.push_back(cmdNode); 4310 } 4311 return skipCall; 4312} 4313// Reset the command buffer state 4314// Maintain the createInfo and set state to CB_NEW, but clear all other state 4315static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) { 4316 GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb]; 4317 if (pCB) { 4318 pCB->cmds.clear(); 4319 // Reset CB state (note that createInfo is not cleared) 4320 pCB->commandBuffer = cb; 4321 memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo)); 4322 memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo)); 4323 pCB->numCmds = 0; 4324 memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t)); 4325 pCB->state = CB_NEW; 4326 pCB->submitCount = 0; 4327 pCB->status = 0; 4328 pCB->viewports.clear(); 4329 pCB->scissors.clear(); 4330 4331 for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) { 4332 // Before clearing lastBoundState, remove any CB bindings from all uniqueBoundSets 4333 for (auto set : pCB->lastBound[i].uniqueBoundSets) { 4334 auto set_node = dev_data->setMap.find(set); 4335 if (set_node != dev_data->setMap.end()) { 4336 set_node->second->boundCmdBuffers.erase(pCB->commandBuffer); 4337 } 4338 } 4339 pCB->lastBound[i].reset(); 4340 } 4341 4342 memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo)); 4343 pCB->activeRenderPass = 0; 4344 pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE; 4345 pCB->activeSubpass = 0; 4346 pCB->lastSubmittedFence = VK_NULL_HANDLE; 4347 pCB->lastSubmittedQueue = VK_NULL_HANDLE; 4348 pCB->destroyedSets.clear(); 4349 pCB->updatedSets.clear(); 4350 pCB->destroyedFramebuffers.clear(); 4351 pCB->waitedEvents.clear(); 4352 pCB->semaphores.clear(); 4353 pCB->events.clear(); 4354 pCB->waitedEventsBeforeQueryReset.clear(); 4355 pCB->queryToStateMap.clear(); 4356 pCB->activeQueries.clear(); 4357 pCB->startedQueries.clear(); 4358 pCB->imageSubresourceMap.clear(); 4359 pCB->imageLayoutMap.clear(); 4360 pCB->eventToStageMap.clear(); 4361 pCB->drawData.clear(); 4362 pCB->currentDrawData.buffers.clear(); 4363 pCB->primaryCommandBuffer = VK_NULL_HANDLE; 4364 // Make sure any secondaryCommandBuffers are removed from globalInFlight 4365 for (auto secondary_cb : pCB->secondaryCommandBuffers) { 4366 dev_data->globalInFlightCmdBuffers.erase(secondary_cb); 4367 } 4368 pCB->secondaryCommandBuffers.clear(); 4369 pCB->updateImages.clear(); 4370 pCB->updateBuffers.clear(); 4371 clear_cmd_buf_and_mem_references(dev_data, pCB); 4372 pCB->eventUpdates.clear(); 4373 4374 // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list 4375 for (auto framebuffer : pCB->framebuffers) { 4376 auto fbNode = dev_data->frameBufferMap.find(framebuffer); 4377 if (fbNode != dev_data->frameBufferMap.end()) { 4378 fbNode->second.referencingCmdBuffers.erase(pCB->commandBuffer); 4379 } 4380 } 4381 pCB->framebuffers.clear(); 4382 4383 } 4384} 4385 4386// Set PSO-related status bits for CB, including dynamic state set via PSO 4387static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe) { 4388 // Account for any dynamic state not set via this PSO 4389 if (!pPipe->graphicsPipelineCI.pDynamicState || 4390 !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) { // All state is static 4391 pCB->status = CBSTATUS_ALL; 4392 } else { 4393 // First consider all state on 4394 // Then unset any state that's noted as dynamic in PSO 4395 // Finally OR that into CB statemask 4396 CBStatusFlags psoDynStateMask = CBSTATUS_ALL; 4397 for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) { 4398 switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) { 4399 case VK_DYNAMIC_STATE_VIEWPORT: 4400 psoDynStateMask &= ~CBSTATUS_VIEWPORT_SET; 4401 break; 4402 case VK_DYNAMIC_STATE_SCISSOR: 4403 psoDynStateMask &= ~CBSTATUS_SCISSOR_SET; 4404 break; 4405 case VK_DYNAMIC_STATE_LINE_WIDTH: 4406 psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET; 4407 break; 4408 case VK_DYNAMIC_STATE_DEPTH_BIAS: 4409 psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET; 4410 break; 4411 case VK_DYNAMIC_STATE_BLEND_CONSTANTS: 4412 psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET; 4413 break; 4414 case VK_DYNAMIC_STATE_DEPTH_BOUNDS: 4415 psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET; 4416 break; 4417 case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK: 4418 psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET; 4419 break; 4420 case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK: 4421 psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET; 4422 break; 4423 case VK_DYNAMIC_STATE_STENCIL_REFERENCE: 4424 psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET; 4425 break; 4426 default: 4427 // TODO : Flag error here 4428 break; 4429 } 4430 } 4431 pCB->status |= psoDynStateMask; 4432 } 4433} 4434 4435// Print the last bound Gfx Pipeline 4436static bool printPipeline(layer_data *my_data, const VkCommandBuffer cb) { 4437 bool skipCall = false; 4438 GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb); 4439 if (pCB) { 4440 PIPELINE_NODE *pPipeTrav = getPipeline(my_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline); 4441 if (!pPipeTrav) { 4442 // nothing to print 4443 } else { 4444 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 4445 __LINE__, DRAWSTATE_NONE, "DS", "%s", 4446 vk_print_vkgraphicspipelinecreateinfo( 4447 reinterpret_cast<const VkGraphicsPipelineCreateInfo *>(&pPipeTrav->graphicsPipelineCI), "{DS}") 4448 .c_str()); 4449 } 4450 } 4451 return skipCall; 4452} 4453 4454static void printCB(layer_data *my_data, const VkCommandBuffer cb) { 4455 GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb); 4456 if (pCB && pCB->cmds.size() > 0) { 4457 log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 4458 DRAWSTATE_NONE, "DS", "Cmds in CB %p", (void *)cb); 4459 vector<CMD_NODE> cmds = pCB->cmds; 4460 for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) { 4461 // TODO : Need to pass cb as srcObj here 4462 log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 4463 __LINE__, DRAWSTATE_NONE, "DS", " CMD#%" PRIu64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str()); 4464 } 4465 } else { 4466 // Nothing to print 4467 } 4468} 4469 4470static bool synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) { 4471 bool skipCall = false; 4472 if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) { 4473 return skipCall; 4474 } 4475 skipCall |= printPipeline(my_data, cb); 4476 return skipCall; 4477} 4478 4479// Flags validation error if the associated call is made inside a render pass. The apiName 4480// routine should ONLY be called outside a render pass. 4481static bool insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) { 4482 bool inside = false; 4483 if (pCB->activeRenderPass) { 4484 inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 4485 (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS", 4486 "%s: It is invalid to issue this call inside an active render pass (%#" PRIxLEAST64 ")", apiName, 4487 (uint64_t)pCB->activeRenderPass); 4488 } 4489 return inside; 4490} 4491 4492// Flags validation error if the associated call is made outside a render pass. The apiName 4493// routine should ONLY be called inside a render pass. 4494static bool outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) { 4495 bool outside = false; 4496 if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) || 4497 ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) && 4498 !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) { 4499 outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 4500 (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS", 4501 "%s: This call must be issued inside an active render pass.", apiName); 4502 } 4503 return outside; 4504} 4505 4506static void init_core_validation(layer_data *instance_data, const VkAllocationCallbacks *pAllocator) { 4507 4508 layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation"); 4509 4510} 4511 4512VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 4513vkCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) { 4514 VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO); 4515 4516 assert(chain_info->u.pLayerInfo); 4517 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr; 4518 PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance"); 4519 if (fpCreateInstance == NULL) 4520 return VK_ERROR_INITIALIZATION_FAILED; 4521 4522 // Advance the link info for the next element on the chain 4523 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext; 4524 4525 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance); 4526 if (result != VK_SUCCESS) 4527 return result; 4528 4529 layer_data *instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map); 4530 instance_data->instance_dispatch_table = new VkLayerInstanceDispatchTable; 4531 layer_init_instance_dispatch_table(*pInstance, instance_data->instance_dispatch_table, fpGetInstanceProcAddr); 4532 4533 instance_data->report_data = 4534 debug_report_create_instance(instance_data->instance_dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, 4535 pCreateInfo->ppEnabledExtensionNames); 4536 4537 init_core_validation(instance_data, pAllocator); 4538 4539 ValidateLayerOrdering(*pCreateInfo); 4540 4541 return result; 4542} 4543 4544/* hook DestroyInstance to remove tableInstanceMap entry */ 4545VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) { 4546 // TODOSC : Shouldn't need any customization here 4547 dispatch_key key = get_dispatch_key(instance); 4548 // TBD: Need any locking this early, in case this function is called at the 4549 // same time by more than one thread? 4550 layer_data *my_data = get_my_data_ptr(key, layer_data_map); 4551 VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table; 4552 pTable->DestroyInstance(instance, pAllocator); 4553 4554 std::lock_guard<std::mutex> lock(global_lock); 4555 // Clean up logging callback, if any 4556 while (my_data->logging_callback.size() > 0) { 4557 VkDebugReportCallbackEXT callback = my_data->logging_callback.back(); 4558 layer_destroy_msg_callback(my_data->report_data, callback, pAllocator); 4559 my_data->logging_callback.pop_back(); 4560 } 4561 4562 layer_debug_report_destroy_instance(my_data->report_data); 4563 delete my_data->instance_dispatch_table; 4564 layer_data_map.erase(key); 4565} 4566 4567static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) { 4568 uint32_t i; 4569 // TBD: Need any locking, in case this function is called at the same time 4570 // by more than one thread? 4571 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 4572 dev_data->device_extensions.wsi_enabled = false; 4573 4574 VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table; 4575 PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr; 4576 pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR"); 4577 pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR"); 4578 pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR"); 4579 pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR"); 4580 pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR"); 4581 4582 for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) { 4583 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0) 4584 dev_data->device_extensions.wsi_enabled = true; 4585 } 4586} 4587 4588VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, 4589 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) { 4590 VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO); 4591 4592 assert(chain_info->u.pLayerInfo); 4593 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr; 4594 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr; 4595 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice"); 4596 if (fpCreateDevice == NULL) { 4597 return VK_ERROR_INITIALIZATION_FAILED; 4598 } 4599 4600 // Advance the link info for the next element on the chain 4601 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext; 4602 4603 VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice); 4604 if (result != VK_SUCCESS) { 4605 return result; 4606 } 4607 4608 std::unique_lock<std::mutex> lock(global_lock); 4609 layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map); 4610 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map); 4611 4612 // Setup device dispatch table 4613 my_device_data->device_dispatch_table = new VkLayerDispatchTable; 4614 layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr); 4615 my_device_data->device = *pDevice; 4616 4617 my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice); 4618 createDeviceRegisterExtensions(pCreateInfo, *pDevice); 4619 // Get physical device limits for this device 4620 my_instance_data->instance_dispatch_table->GetPhysicalDeviceProperties(gpu, &(my_device_data->phys_dev_properties.properties)); 4621 uint32_t count; 4622 my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr); 4623 my_device_data->phys_dev_properties.queue_family_properties.resize(count); 4624 my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties( 4625 gpu, &count, &my_device_data->phys_dev_properties.queue_family_properties[0]); 4626 // TODO: device limits should make sure these are compatible 4627 if (pCreateInfo->pEnabledFeatures) { 4628 my_device_data->phys_dev_properties.features = *pCreateInfo->pEnabledFeatures; 4629 } else { 4630 memset(&my_device_data->phys_dev_properties.features, 0, sizeof(VkPhysicalDeviceFeatures)); 4631 } 4632 // Store physical device mem limits into device layer_data struct 4633 my_instance_data->instance_dispatch_table->GetPhysicalDeviceMemoryProperties(gpu, &my_device_data->phys_dev_mem_props); 4634 lock.unlock(); 4635 4636 ValidateLayerOrdering(*pCreateInfo); 4637 4638 return result; 4639} 4640 4641// prototype 4642static void deleteRenderPasses(layer_data *); 4643VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) { 4644 // TODOSC : Shouldn't need any customization here 4645 dispatch_key key = get_dispatch_key(device); 4646 layer_data *dev_data = get_my_data_ptr(key, layer_data_map); 4647 // Free all the memory 4648 std::unique_lock<std::mutex> lock(global_lock); 4649 deletePipelines(dev_data); 4650 deleteRenderPasses(dev_data); 4651 deleteCommandBuffers(dev_data); 4652 deletePools(dev_data); 4653 deleteLayouts(dev_data); 4654 dev_data->imageViewMap.clear(); 4655 dev_data->imageMap.clear(); 4656 dev_data->imageSubresourceMap.clear(); 4657 dev_data->imageLayoutMap.clear(); 4658 dev_data->bufferViewMap.clear(); 4659 dev_data->bufferMap.clear(); 4660 // Queues persist until device is destroyed 4661 dev_data->queueMap.clear(); 4662 lock.unlock(); 4663#if MTMERGESOURCE 4664 bool skipCall = false; 4665 lock.lock(); 4666 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 4667 (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()"); 4668 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 4669 (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================"); 4670 print_mem_list(dev_data); 4671 printCBList(dev_data); 4672 // Report any memory leaks 4673 DEVICE_MEM_INFO *pInfo = NULL; 4674 if (!dev_data->memObjMap.empty()) { 4675 for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) { 4676 pInfo = &(*ii).second; 4677 if (pInfo->allocInfo.allocationSize != 0) { 4678 // Valid Usage: All child objects created on device must have been destroyed prior to destroying device 4679 skipCall |= 4680 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 4681 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK, 4682 "MEM", "Mem Object %" PRIu64 " has not been freed. You should clean up this memory by calling " 4683 "vkFreeMemory(%" PRIu64 ") prior to vkDestroyDevice().", 4684 (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem)); 4685 } 4686 } 4687 } 4688 layer_debug_report_destroy_device(device); 4689 lock.unlock(); 4690 4691#if DISPATCH_MAP_DEBUG 4692 fprintf(stderr, "Device: %p, key: %p\n", device, key); 4693#endif 4694 VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table; 4695 if (!skipCall) { 4696 pDisp->DestroyDevice(device, pAllocator); 4697 } 4698#else 4699 dev_data->device_dispatch_table->DestroyDevice(device, pAllocator); 4700#endif 4701 delete dev_data->device_dispatch_table; 4702 layer_data_map.erase(key); 4703} 4704 4705static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}}; 4706 4707VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 4708vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) { 4709 return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties); 4710} 4711 4712VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 4713vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) { 4714 return util_GetLayerProperties(ARRAY_SIZE(cv_global_layers), cv_global_layers, pCount, pProperties); 4715} 4716 4717// TODO: Why does this exist - can we just use global? 4718static const VkLayerProperties cv_device_layers[] = {{ 4719 "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer", 4720}}; 4721 4722VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, 4723 const char *pLayerName, uint32_t *pCount, 4724 VkExtensionProperties *pProperties) { 4725 if (pLayerName == NULL) { 4726 dispatch_key key = get_dispatch_key(physicalDevice); 4727 layer_data *my_data = get_my_data_ptr(key, layer_data_map); 4728 return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties); 4729 } else { 4730 return util_GetExtensionProperties(0, NULL, pCount, pProperties); 4731 } 4732} 4733 4734VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 4735vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) { 4736 /* draw_state physical device layers are the same as global */ 4737 return util_GetLayerProperties(ARRAY_SIZE(cv_device_layers), cv_device_layers, pCount, pProperties); 4738} 4739 4740// This validates that the initial layout specified in the command buffer for 4741// the IMAGE is the same 4742// as the global IMAGE layout 4743static bool ValidateCmdBufImageLayouts(VkCommandBuffer cmdBuffer) { 4744 bool skip_call = false; 4745 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map); 4746 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer); 4747 for (auto cb_image_data : pCB->imageLayoutMap) { 4748 VkImageLayout imageLayout; 4749 if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) { 4750 skip_call |= 4751 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 4752 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image %" PRIu64 ".", 4753 reinterpret_cast<const uint64_t &>(cb_image_data.first)); 4754 } else { 4755 if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) { 4756 // TODO: Set memory invalid which is in mem_tracker currently 4757 } else if (imageLayout != cb_image_data.second.initialLayout) { 4758 if (cb_image_data.first.hasSubresource) { 4759 skip_call |= log_msg( 4760 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 4761 reinterpret_cast<uint64_t &>(cmdBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 4762 "Cannot submit cmd buffer using image (%" PRIx64 ") [sub-resource: array layer %u, mip level %u], " 4763 "with layout %s when first use is %s.", 4764 reinterpret_cast<const uint64_t &>(cb_image_data.first.image), cb_image_data.first.subresource.arrayLayer, 4765 cb_image_data.first.subresource.mipLevel, string_VkImageLayout(imageLayout), 4766 string_VkImageLayout(cb_image_data.second.initialLayout)); 4767 } else { 4768 skip_call |= log_msg( 4769 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 4770 reinterpret_cast<uint64_t &>(cmdBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 4771 "Cannot submit cmd buffer using image (%" PRIx64 ") with layout %s when " 4772 "first use is %s.", 4773 reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout), 4774 string_VkImageLayout(cb_image_data.second.initialLayout)); 4775 } 4776 } 4777 SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout); 4778 } 4779 } 4780 return skip_call; 4781} 4782 4783// Track which resources are in-flight by atomically incrementing their "in_use" count 4784static bool validateAndIncrementResources(layer_data *my_data, GLOBAL_CB_NODE *pCB) { 4785 bool skip_call = false; 4786 for (auto drawDataElement : pCB->drawData) { 4787 for (auto buffer : drawDataElement.buffers) { 4788 auto buffer_data = my_data->bufferMap.find(buffer); 4789 if (buffer_data == my_data->bufferMap.end()) { 4790 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, 4791 (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS", 4792 "Cannot submit cmd buffer using deleted buffer %" PRIu64 ".", (uint64_t)(buffer)); 4793 } else { 4794 buffer_data->second.in_use.fetch_add(1); 4795 } 4796 } 4797 } 4798 for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) { 4799 for (auto set : pCB->lastBound[i].uniqueBoundSets) { 4800 auto setNode = my_data->setMap.find(set); 4801 if (setNode == my_data->setMap.end()) { 4802 skip_call |= 4803 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 4804 (uint64_t)(set), __LINE__, DRAWSTATE_INVALID_DESCRIPTOR_SET, "DS", 4805 "Cannot submit cmd buffer using deleted descriptor set %" PRIu64 ".", (uint64_t)(set)); 4806 } else { 4807 setNode->second->in_use.fetch_add(1); 4808 } 4809 } 4810 } 4811 for (auto semaphore : pCB->semaphores) { 4812 auto semaphoreNode = my_data->semaphoreMap.find(semaphore); 4813 if (semaphoreNode == my_data->semaphoreMap.end()) { 4814 skip_call |= 4815 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 4816 reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS", 4817 "Cannot submit cmd buffer using deleted semaphore %" PRIu64 ".", reinterpret_cast<uint64_t &>(semaphore)); 4818 } else { 4819 semaphoreNode->second.in_use.fetch_add(1); 4820 } 4821 } 4822 for (auto event : pCB->events) { 4823 auto eventNode = my_data->eventMap.find(event); 4824 if (eventNode == my_data->eventMap.end()) { 4825 skip_call |= 4826 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 4827 reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS", 4828 "Cannot submit cmd buffer using deleted event %" PRIu64 ".", reinterpret_cast<uint64_t &>(event)); 4829 } else { 4830 eventNode->second.in_use.fetch_add(1); 4831 } 4832 } 4833 return skip_call; 4834} 4835 4836static void decrementResources(layer_data *my_data, VkCommandBuffer cmdBuffer) { 4837 GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer); 4838 for (auto drawDataElement : pCB->drawData) { 4839 for (auto buffer : drawDataElement.buffers) { 4840 auto buffer_data = my_data->bufferMap.find(buffer); 4841 if (buffer_data != my_data->bufferMap.end()) { 4842 buffer_data->second.in_use.fetch_sub(1); 4843 } 4844 } 4845 } 4846 for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) { 4847 for (auto set : pCB->lastBound[i].uniqueBoundSets) { 4848 auto setNode = my_data->setMap.find(set); 4849 if (setNode != my_data->setMap.end()) { 4850 setNode->second->in_use.fetch_sub(1); 4851 } 4852 } 4853 } 4854 for (auto semaphore : pCB->semaphores) { 4855 auto semaphoreNode = my_data->semaphoreMap.find(semaphore); 4856 if (semaphoreNode != my_data->semaphoreMap.end()) { 4857 semaphoreNode->second.in_use.fetch_sub(1); 4858 } 4859 } 4860 for (auto event : pCB->events) { 4861 auto eventNode = my_data->eventMap.find(event); 4862 if (eventNode != my_data->eventMap.end()) { 4863 eventNode->second.in_use.fetch_sub(1); 4864 } 4865 } 4866 for (auto queryStatePair : pCB->queryToStateMap) { 4867 my_data->queryToStateMap[queryStatePair.first] = queryStatePair.second; 4868 } 4869 for (auto eventStagePair : pCB->eventToStageMap) { 4870 my_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second; 4871 } 4872} 4873// For fenceCount fences in pFences, mark fence signaled, decrement in_use, and call 4874// decrementResources for all priorFences and cmdBuffers associated with fence. 4875static void decrementResources(layer_data *my_data, uint32_t fenceCount, const VkFence *pFences) { 4876 for (uint32_t i = 0; i < fenceCount; ++i) { 4877 auto fence_data = my_data->fenceMap.find(pFences[i]); 4878 if (fence_data == my_data->fenceMap.end() || !fence_data->second.needsSignaled) 4879 return; 4880 fence_data->second.needsSignaled = false; 4881 fence_data->second.in_use.fetch_sub(1); 4882 decrementResources(my_data, static_cast<uint32_t>(fence_data->second.priorFences.size()), 4883 fence_data->second.priorFences.data()); 4884 for (auto cmdBuffer : fence_data->second.cmdBuffers) { 4885 decrementResources(my_data, cmdBuffer); 4886 } 4887 } 4888} 4889// Decrement in_use for all outstanding cmd buffers that were submitted on this queue 4890static void decrementResources(layer_data *my_data, VkQueue queue) { 4891 auto queue_data = my_data->queueMap.find(queue); 4892 if (queue_data != my_data->queueMap.end()) { 4893 for (auto cmdBuffer : queue_data->second.untrackedCmdBuffers) { 4894 decrementResources(my_data, cmdBuffer); 4895 } 4896 queue_data->second.untrackedCmdBuffers.clear(); 4897 decrementResources(my_data, static_cast<uint32_t>(queue_data->second.lastFences.size()), 4898 queue_data->second.lastFences.data()); 4899 } 4900} 4901 4902static void updateTrackedCommandBuffers(layer_data *dev_data, VkQueue queue, VkQueue other_queue, VkFence fence) { 4903 if (queue == other_queue) { 4904 return; 4905 } 4906 auto queue_data = dev_data->queueMap.find(queue); 4907 auto other_queue_data = dev_data->queueMap.find(other_queue); 4908 if (queue_data == dev_data->queueMap.end() || other_queue_data == dev_data->queueMap.end()) { 4909 return; 4910 } 4911 for (auto fenceInner : other_queue_data->second.lastFences) { 4912 queue_data->second.lastFences.push_back(fenceInner); 4913 } 4914 if (fence != VK_NULL_HANDLE) { 4915 auto fence_data = dev_data->fenceMap.find(fence); 4916 if (fence_data == dev_data->fenceMap.end()) { 4917 return; 4918 } 4919 for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) { 4920 fence_data->second.cmdBuffers.push_back(cmdbuffer); 4921 } 4922 other_queue_data->second.untrackedCmdBuffers.clear(); 4923 } else { 4924 for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) { 4925 queue_data->second.untrackedCmdBuffers.push_back(cmdbuffer); 4926 } 4927 other_queue_data->second.untrackedCmdBuffers.clear(); 4928 } 4929 for (auto eventStagePair : other_queue_data->second.eventToStageMap) { 4930 queue_data->second.eventToStageMap[eventStagePair.first] = eventStagePair.second; 4931 } 4932} 4933 4934static void trackCommandBuffers(layer_data *my_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, 4935 VkFence fence) { 4936 auto queue_data = my_data->queueMap.find(queue); 4937 if (fence != VK_NULL_HANDLE) { 4938 vector<VkFence> prior_fences; 4939 auto fence_data = my_data->fenceMap.find(fence); 4940 if (fence_data == my_data->fenceMap.end()) { 4941 return; 4942 } 4943 fence_data->second.cmdBuffers.clear(); 4944 if (queue_data != my_data->queueMap.end()) { 4945 prior_fences = queue_data->second.lastFences; 4946 queue_data->second.lastFences.clear(); 4947 queue_data->second.lastFences.push_back(fence); 4948 for (auto cmdbuffer : queue_data->second.untrackedCmdBuffers) { 4949 fence_data->second.cmdBuffers.push_back(cmdbuffer); 4950 } 4951 queue_data->second.untrackedCmdBuffers.clear(); 4952 } 4953 fence_data->second.priorFences = prior_fences; 4954 fence_data->second.needsSignaled = true; 4955 fence_data->second.queue = queue; 4956 fence_data->second.in_use.fetch_add(1); 4957 for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { 4958 const VkSubmitInfo *submit = &pSubmits[submit_idx]; 4959 for (uint32_t i = 0; i < submit->commandBufferCount; ++i) { 4960 for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) { 4961 fence_data->second.cmdBuffers.push_back(secondaryCmdBuffer); 4962 } 4963 fence_data->second.cmdBuffers.push_back(submit->pCommandBuffers[i]); 4964 } 4965 } 4966 } else { 4967 if (queue_data != my_data->queueMap.end()) { 4968 for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { 4969 const VkSubmitInfo *submit = &pSubmits[submit_idx]; 4970 for (uint32_t i = 0; i < submit->commandBufferCount; ++i) { 4971 for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) { 4972 queue_data->second.untrackedCmdBuffers.push_back(secondaryCmdBuffer); 4973 } 4974 queue_data->second.untrackedCmdBuffers.push_back(submit->pCommandBuffers[i]); 4975 } 4976 } 4977 } 4978 } 4979 if (queue_data != my_data->queueMap.end()) { 4980 for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { 4981 const VkSubmitInfo *submit = &pSubmits[submit_idx]; 4982 for (uint32_t i = 0; i < submit->commandBufferCount; ++i) { 4983 // Add cmdBuffers to both the global set and queue set 4984 for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) { 4985 my_data->globalInFlightCmdBuffers.insert(secondaryCmdBuffer); 4986 queue_data->second.inFlightCmdBuffers.insert(secondaryCmdBuffer); 4987 } 4988 my_data->globalInFlightCmdBuffers.insert(submit->pCommandBuffers[i]); 4989 queue_data->second.inFlightCmdBuffers.insert(submit->pCommandBuffers[i]); 4990 } 4991 } 4992 } 4993} 4994 4995static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) { 4996 bool skip_call = false; 4997 if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) && 4998 !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) { 4999 skip_call |= 5000 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 5001 __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS", 5002 "Command Buffer %#" PRIx64 " is already in use and is not marked for simultaneous use.", 5003 reinterpret_cast<uint64_t>(pCB->commandBuffer)); 5004 } 5005 return skip_call; 5006} 5007 5008static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) { 5009 bool skipCall = false; 5010 // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once 5011 if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) { 5012 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 5013 __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS", 5014 "CB %#" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT " 5015 "set, but has been submitted %#" PRIxLEAST64 " times.", 5016 (uint64_t)(pCB->commandBuffer), pCB->submitCount); 5017 } 5018 // Validate that cmd buffers have been updated 5019 if (CB_RECORDED != pCB->state) { 5020 if (CB_INVALID == pCB->state) { 5021 // Inform app of reason CB invalid 5022 bool causeReported = false; 5023 if (!pCB->destroyedSets.empty()) { 5024 std::stringstream set_string; 5025 for (auto set : pCB->destroyedSets) 5026 set_string << " " << set; 5027 5028 skipCall |= 5029 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 5030 (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 5031 "You are submitting command buffer %#" PRIxLEAST64 5032 " that is invalid because it had the following bound descriptor set(s) destroyed: %s", 5033 (uint64_t)(pCB->commandBuffer), set_string.str().c_str()); 5034 causeReported = true; 5035 } 5036 if (!pCB->updatedSets.empty()) { 5037 std::stringstream set_string; 5038 for (auto set : pCB->updatedSets) 5039 set_string << " " << set; 5040 5041 skipCall |= 5042 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 5043 (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 5044 "You are submitting command buffer %#" PRIxLEAST64 5045 " that is invalid because it had the following bound descriptor set(s) updated: %s", 5046 (uint64_t)(pCB->commandBuffer), set_string.str().c_str()); 5047 causeReported = true; 5048 } 5049 if (!pCB->destroyedFramebuffers.empty()) { 5050 std::stringstream fb_string; 5051 for (auto fb : pCB->destroyedFramebuffers) 5052 fb_string << " " << fb; 5053 5054 skipCall |= 5055 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 5056 reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 5057 "You are submitting command buffer %#" PRIxLEAST64 " that is invalid because it had the following " 5058 "referenced framebuffers destroyed: %s", 5059 reinterpret_cast<uint64_t &>(pCB->commandBuffer), fb_string.str().c_str()); 5060 causeReported = true; 5061 } 5062 // TODO : This is defensive programming to make sure an error is 5063 // flagged if we hit this INVALID cmd buffer case and none of the 5064 // above cases are hit. As the number of INVALID cases grows, this 5065 // code should be updated to seemlessly handle all the cases. 5066 if (!causeReported) { 5067 skipCall |= log_msg( 5068 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 5069 reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 5070 "You are submitting command buffer %#" PRIxLEAST64 " that is invalid due to an unknown cause. Validation " 5071 "should " 5072 "be improved to report the exact cause.", 5073 reinterpret_cast<uint64_t &>(pCB->commandBuffer)); 5074 } 5075 } else { // Flag error for using CB w/o vkEndCommandBuffer() called 5076 skipCall |= 5077 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 5078 (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS", 5079 "You must call vkEndCommandBuffer() on CB %#" PRIxLEAST64 " before this call to vkQueueSubmit()!", 5080 (uint64_t)(pCB->commandBuffer)); 5081 } 5082 } 5083 return skipCall; 5084} 5085 5086static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) { 5087 // Track in-use for resources off of primary and any secondary CBs 5088 bool skipCall = validateAndIncrementResources(dev_data, pCB); 5089 if (!pCB->secondaryCommandBuffers.empty()) { 5090 for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) { 5091 skipCall |= validateAndIncrementResources(dev_data, dev_data->commandBufferMap[secondaryCmdBuffer]); 5092 GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer); 5093 if (pSubCB->primaryCommandBuffer != pCB->commandBuffer) { 5094 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 5095 __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS", 5096 "CB %#" PRIxLEAST64 " was submitted with secondary buffer %#" PRIxLEAST64 5097 " but that buffer has subsequently been bound to " 5098 "primary cmd buffer %#" PRIxLEAST64 ".", 5099 reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer), 5100 reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer)); 5101 } 5102 } 5103 } 5104 skipCall |= validateCommandBufferState(dev_data, pCB); 5105 // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing 5106 // on device 5107 skipCall |= validateCommandBufferSimultaneousUse(dev_data, pCB); 5108 return skipCall; 5109} 5110 5111VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 5112vkQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) { 5113 bool skipCall = false; 5114 GLOBAL_CB_NODE *pCBNode = NULL; 5115 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map); 5116 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 5117 std::unique_lock<std::mutex> lock(global_lock); 5118 // First verify that fence is not in use 5119 if (fence != VK_NULL_HANDLE) { 5120 dev_data->fenceMap[fence].queue = queue; 5121 if ((submitCount != 0) && dev_data->fenceMap[fence].in_use.load()) { 5122 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, 5123 (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS", 5124 "Fence %#" PRIx64 " is already in use by another submission.", (uint64_t)(fence)); 5125 } 5126 if (!dev_data->fenceMap[fence].needsSignaled) { 5127 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, 5128 reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM", 5129 "Fence %#" PRIxLEAST64 " submitted in SIGNALED state. Fences must be reset before being submitted", 5130 reinterpret_cast<uint64_t &>(fence)); 5131 } 5132 } 5133 // TODO : Review these old print functions and clean up as appropriate 5134 print_mem_list(dev_data); 5135 printCBList(dev_data); 5136 // Now verify each individual submit 5137 std::unordered_set<VkQueue> processed_other_queues; 5138 for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { 5139 const VkSubmitInfo *submit = &pSubmits[submit_idx]; 5140 vector<VkSemaphore> semaphoreList; 5141 for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) { 5142 const VkSemaphore &semaphore = submit->pWaitSemaphores[i]; 5143 semaphoreList.push_back(semaphore); 5144 if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) { 5145 if (dev_data->semaphoreMap[semaphore].signaled) { 5146 dev_data->semaphoreMap[semaphore].signaled = false; 5147 } else { 5148 skipCall |= 5149 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, 5150 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS", 5151 "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.", 5152 reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore)); 5153 } 5154 const VkQueue &other_queue = dev_data->semaphoreMap[semaphore].queue; 5155 if (other_queue != VK_NULL_HANDLE && !processed_other_queues.count(other_queue)) { 5156 updateTrackedCommandBuffers(dev_data, queue, other_queue, fence); 5157 processed_other_queues.insert(other_queue); 5158 } 5159 } 5160 } 5161 for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) { 5162 const VkSemaphore &semaphore = submit->pSignalSemaphores[i]; 5163 if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) { 5164 semaphoreList.push_back(semaphore); 5165 if (dev_data->semaphoreMap[semaphore].signaled) { 5166 skipCall |= 5167 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, 5168 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS", 5169 "Queue %#" PRIx64 " is signaling semaphore %#" PRIx64 5170 " that has already been signaled but not waited on by queue %#" PRIx64 ".", 5171 reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore), 5172 reinterpret_cast<uint64_t &>(dev_data->semaphoreMap[semaphore].queue)); 5173 } else { 5174 dev_data->semaphoreMap[semaphore].signaled = true; 5175 dev_data->semaphoreMap[semaphore].queue = queue; 5176 } 5177 } 5178 } 5179 for (uint32_t i = 0; i < submit->commandBufferCount; i++) { 5180 skipCall |= ValidateCmdBufImageLayouts(submit->pCommandBuffers[i]); 5181 pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]); 5182 if (pCBNode) { 5183 pCBNode->semaphores = semaphoreList; 5184 pCBNode->submitCount++; // increment submit count 5185 pCBNode->lastSubmittedFence = fence; 5186 pCBNode->lastSubmittedQueue = queue; 5187 skipCall |= validatePrimaryCommandBufferState(dev_data, pCBNode); 5188 // Call submit-time functions to validate/update state 5189 for (auto &function : pCBNode->validate_functions) { 5190 skipCall |= function(); 5191 } 5192 for (auto &function : pCBNode->eventUpdates) { 5193 skipCall |= function(queue); 5194 } 5195 } 5196 } 5197 } 5198 // Update cmdBuffer-related data structs and mark fence in-use 5199 trackCommandBuffers(dev_data, queue, submitCount, pSubmits, fence); 5200 lock.unlock(); 5201 if (!skipCall) 5202 result = dev_data->device_dispatch_table->QueueSubmit(queue, submitCount, pSubmits, fence); 5203 5204 return result; 5205} 5206 5207#if MTMERGESOURCE 5208VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo, 5209 const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) { 5210 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5211 VkResult result = my_data->device_dispatch_table->AllocateMemory(device, pAllocateInfo, pAllocator, pMemory); 5212 // TODO : Track allocations and overall size here 5213 std::lock_guard<std::mutex> lock(global_lock); 5214 add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo); 5215 print_mem_list(my_data); 5216 return result; 5217} 5218 5219VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5220vkFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) { 5221 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5222 5223 // From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed. 5224 // Before freeing a memory object, an application must ensure the memory object is no longer 5225 // in use by the device—for example by command buffers queued for execution. The memory need 5226 // not yet be unbound from all images and buffers, but any further use of those images or 5227 // buffers (on host or device) for anything other than destroying those objects will result in 5228 // undefined behavior. 5229 5230 std::unique_lock<std::mutex> lock(global_lock); 5231 freeMemObjInfo(my_data, device, mem, false); 5232 print_mem_list(my_data); 5233 printCBList(my_data); 5234 lock.unlock(); 5235 my_data->device_dispatch_table->FreeMemory(device, mem, pAllocator); 5236} 5237 5238static bool validateMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) { 5239 bool skipCall = false; 5240 5241 if (size == 0) { 5242 // TODO: a size of 0 is not listed as an invalid use in the spec, should it be? 5243 skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 5244 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM", 5245 "VkMapMemory: Attempting to map memory range of size zero"); 5246 } 5247 5248 auto mem_element = my_data->memObjMap.find(mem); 5249 if (mem_element != my_data->memObjMap.end()) { 5250 // It is an application error to call VkMapMemory on an object that is already mapped 5251 if (mem_element->second.memRange.size != 0) { 5252 skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 5253 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM", 5254 "VkMapMemory: Attempting to map memory on an already-mapped object %#" PRIxLEAST64, (uint64_t)mem); 5255 } 5256 5257 // Validate that offset + size is within object's allocationSize 5258 if (size == VK_WHOLE_SIZE) { 5259 if (offset >= mem_element->second.allocInfo.allocationSize) { 5260 skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 5261 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, 5262 "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset, 5263 mem_element->second.allocInfo.allocationSize, mem_element->second.allocInfo.allocationSize); 5264 } 5265 } else { 5266 if ((offset + size) > mem_element->second.allocInfo.allocationSize) { 5267 skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 5268 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, 5269 "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset, 5270 size + offset, mem_element->second.allocInfo.allocationSize); 5271 } 5272 } 5273 } 5274 return skipCall; 5275} 5276 5277static void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) { 5278 auto mem_element = my_data->memObjMap.find(mem); 5279 if (mem_element != my_data->memObjMap.end()) { 5280 MemRange new_range; 5281 new_range.offset = offset; 5282 new_range.size = size; 5283 mem_element->second.memRange = new_range; 5284 } 5285} 5286 5287static bool deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) { 5288 bool skipCall = false; 5289 auto mem_element = my_data->memObjMap.find(mem); 5290 if (mem_element != my_data->memObjMap.end()) { 5291 if (!mem_element->second.memRange.size) { 5292 // Valid Usage: memory must currently be mapped 5293 skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 5294 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM", 5295 "Unmapping Memory without memory being mapped: mem obj %#" PRIxLEAST64, (uint64_t)mem); 5296 } 5297 mem_element->second.memRange.size = 0; 5298 if (mem_element->second.pData) { 5299 free(mem_element->second.pData); 5300 mem_element->second.pData = 0; 5301 } 5302 } 5303 return skipCall; 5304} 5305 5306static char NoncoherentMemoryFillValue = 0xb; 5307 5308static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize size, void **ppData) { 5309 auto mem_element = dev_data->memObjMap.find(mem); 5310 if (mem_element != dev_data->memObjMap.end()) { 5311 mem_element->second.pDriverData = *ppData; 5312 uint32_t index = mem_element->second.allocInfo.memoryTypeIndex; 5313 if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) { 5314 mem_element->second.pData = 0; 5315 } else { 5316 if (size == VK_WHOLE_SIZE) { 5317 size = mem_element->second.allocInfo.allocationSize; 5318 } 5319 size_t convSize = (size_t)(size); 5320 mem_element->second.pData = malloc(2 * convSize); 5321 memset(mem_element->second.pData, NoncoherentMemoryFillValue, 2 * convSize); 5322 *ppData = static_cast<char *>(mem_element->second.pData) + (convSize / 2); 5323 } 5324 } 5325} 5326#endif 5327// Note: This function assumes that the global lock is held by the calling 5328// thread. 5329static bool cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) { 5330 bool skip_call = false; 5331 GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer); 5332 if (pCB) { 5333 for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) { 5334 for (auto event : queryEventsPair.second) { 5335 if (my_data->eventMap[event].needsSignaled) { 5336 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 5337 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS", 5338 "Cannot get query results on queryPool %" PRIu64 5339 " with index %d which was guarded by unsignaled event %" PRIu64 ".", 5340 (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event)); 5341 } 5342 } 5343 } 5344 } 5345 return skip_call; 5346} 5347// Remove given cmd_buffer from the global inFlight set. 5348// Also, if given queue is valid, then remove the cmd_buffer from that queues 5349// inFlightCmdBuffer set. Finally, check all other queues and if given cmd_buffer 5350// is still in flight on another queue, add it back into the global set. 5351// Note: This function assumes that the global lock is held by the calling 5352// thread. 5353static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkQueue queue) { 5354 // Pull it off of global list initially, but if we find it in any other queue list, add it back in 5355 dev_data->globalInFlightCmdBuffers.erase(cmd_buffer); 5356 if (dev_data->queueMap.find(queue) != dev_data->queueMap.end()) { 5357 dev_data->queueMap[queue].inFlightCmdBuffers.erase(cmd_buffer); 5358 for (auto q : dev_data->queues) { 5359 if ((q != queue) && 5360 (dev_data->queueMap[q].inFlightCmdBuffers.find(cmd_buffer) != dev_data->queueMap[q].inFlightCmdBuffers.end())) { 5361 dev_data->globalInFlightCmdBuffers.insert(cmd_buffer); 5362 break; 5363 } 5364 } 5365 } 5366} 5367// Verify that state for fence being waited on is appropriate. That is, 5368// a fence being waited on should not already be signalled and 5369// it should have been submitted on a queue or during acquire next image 5370static inline bool verifyWaitFenceState(VkDevice device, VkFence fence, const char *apiCall) { 5371 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5372 bool skipCall = false; 5373 auto pFenceInfo = my_data->fenceMap.find(fence); 5374 if (pFenceInfo != my_data->fenceMap.end()) { 5375 if (!pFenceInfo->second.firstTimeFlag) { 5376 if (!pFenceInfo->second.needsSignaled) { 5377 skipCall |= 5378 log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, 5379 (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM", 5380 "%s specified fence %#" PRIxLEAST64 " already in SIGNALED state.", apiCall, (uint64_t)fence); 5381 } 5382 if (!pFenceInfo->second.queue && !pFenceInfo->second.swapchain) { // Checking status of unsubmitted fence 5383 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, 5384 reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM", 5385 "%s called for fence %#" PRIxLEAST64 " which has not been submitted on a Queue or during " 5386 "acquire next image.", 5387 apiCall, reinterpret_cast<uint64_t &>(fence)); 5388 } 5389 } else { 5390 pFenceInfo->second.firstTimeFlag = false; 5391 } 5392 } 5393 return skipCall; 5394} 5395 5396VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 5397vkWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) { 5398 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5399 bool skip_call = false; 5400 // Verify fence status of submitted fences 5401 std::unique_lock<std::mutex> lock(global_lock); 5402 for (uint32_t i = 0; i < fenceCount; i++) { 5403 skip_call |= verifyWaitFenceState(device, pFences[i], "vkWaitForFences"); 5404 } 5405 lock.unlock(); 5406 if (skip_call) 5407 return VK_ERROR_VALIDATION_FAILED_EXT; 5408 5409 VkResult result = dev_data->device_dispatch_table->WaitForFences(device, fenceCount, pFences, waitAll, timeout); 5410 5411 if (result == VK_SUCCESS) { 5412 lock.lock(); 5413 // When we know that all fences are complete we can clean/remove their CBs 5414 if (waitAll || fenceCount == 1) { 5415 for (uint32_t i = 0; i < fenceCount; ++i) { 5416 auto &fence_node = dev_data->fenceMap[pFences[i]]; 5417 VkQueue fence_queue = fence_node.queue; 5418 for (auto cmdBuffer : fence_node.cmdBuffers) { 5419 skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer); 5420 removeInFlightCmdBuffer(dev_data, cmdBuffer, fence_queue); 5421 } 5422 } 5423 decrementResources(dev_data, fenceCount, pFences); 5424 } 5425 // NOTE : Alternate case not handled here is when some fences have completed. In 5426 // this case for app to guarantee which fences completed it will have to call 5427 // vkGetFenceStatus() at which point we'll clean/remove their CBs if complete. 5428 lock.unlock(); 5429 } 5430 if (skip_call) 5431 return VK_ERROR_VALIDATION_FAILED_EXT; 5432 return result; 5433} 5434 5435VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(VkDevice device, VkFence fence) { 5436 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5437 bool skipCall = false; 5438 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 5439 std::unique_lock<std::mutex> lock(global_lock); 5440 skipCall = verifyWaitFenceState(device, fence, "vkGetFenceStatus"); 5441 lock.unlock(); 5442 5443 if (skipCall) 5444 return result; 5445 5446 result = dev_data->device_dispatch_table->GetFenceStatus(device, fence); 5447 bool skip_call = false; 5448 lock.lock(); 5449 if (result == VK_SUCCESS) { 5450 auto &fence_node = dev_data->fenceMap[fence]; 5451 auto fence_queue = fence_node.queue; 5452 for (auto cmdBuffer : fence_node.cmdBuffers) { 5453 skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer); 5454 removeInFlightCmdBuffer(dev_data, cmdBuffer, fence_queue); 5455 } 5456 decrementResources(dev_data, 1, &fence); 5457 } 5458 lock.unlock(); 5459 if (skip_call) 5460 return VK_ERROR_VALIDATION_FAILED_EXT; 5461 return result; 5462} 5463 5464VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, 5465 VkQueue *pQueue) { 5466 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5467 dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue); 5468 std::lock_guard<std::mutex> lock(global_lock); 5469 5470 // Add queue to tracking set only if it is new 5471 auto result = dev_data->queues.emplace(*pQueue); 5472 if (result.second == true) { 5473 QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue]; 5474 pQNode->device = device; 5475 } 5476} 5477 5478VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(VkQueue queue) { 5479 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map); 5480 decrementResources(dev_data, queue); 5481 bool skip_call = false; 5482 std::unique_lock<std::mutex> lock(global_lock); 5483 // Iterate over local set since we erase set members as we go in for loop 5484 auto local_cb_set = dev_data->queueMap[queue].inFlightCmdBuffers; 5485 for (auto cmdBuffer : local_cb_set) { 5486 skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer); 5487 removeInFlightCmdBuffer(dev_data, cmdBuffer, queue); 5488 } 5489 dev_data->queueMap[queue].inFlightCmdBuffers.clear(); 5490 lock.unlock(); 5491 if (skip_call) 5492 return VK_ERROR_VALIDATION_FAILED_EXT; 5493 VkResult result = dev_data->device_dispatch_table->QueueWaitIdle(queue); 5494 return result; 5495} 5496 5497VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(VkDevice device) { 5498 bool skip_call = false; 5499 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5500 std::unique_lock<std::mutex> lock(global_lock); 5501 for (auto queue : dev_data->queues) { 5502 decrementResources(dev_data, queue); 5503 if (dev_data->queueMap.find(queue) != dev_data->queueMap.end()) { 5504 // Clear all of the queue inFlightCmdBuffers (global set cleared below) 5505 dev_data->queueMap[queue].inFlightCmdBuffers.clear(); 5506 } 5507 } 5508 for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) { 5509 skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer); 5510 } 5511 dev_data->globalInFlightCmdBuffers.clear(); 5512 lock.unlock(); 5513 if (skip_call) 5514 return VK_ERROR_VALIDATION_FAILED_EXT; 5515 VkResult result = dev_data->device_dispatch_table->DeviceWaitIdle(device); 5516 return result; 5517} 5518 5519VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) { 5520 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5521 bool skipCall = false; 5522 std::unique_lock<std::mutex> lock(global_lock); 5523 auto fence_pair = dev_data->fenceMap.find(fence); 5524 if (fence_pair != dev_data->fenceMap.end()) { 5525 if (fence_pair->second.in_use.load()) { 5526 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, 5527 (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS", 5528 "Fence %#" PRIx64 " is in use by a command buffer.", (uint64_t)(fence)); 5529 } 5530 dev_data->fenceMap.erase(fence_pair); 5531 } 5532 lock.unlock(); 5533 5534 if (!skipCall) 5535 dev_data->device_dispatch_table->DestroyFence(device, fence, pAllocator); 5536} 5537 5538VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5539vkDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) { 5540 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5541 dev_data->device_dispatch_table->DestroySemaphore(device, semaphore, pAllocator); 5542 std::lock_guard<std::mutex> lock(global_lock); 5543 auto item = dev_data->semaphoreMap.find(semaphore); 5544 if (item != dev_data->semaphoreMap.end()) { 5545 if (item->second.in_use.load()) { 5546 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, 5547 reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS", 5548 "Cannot delete semaphore %" PRIx64 " which is in use.", reinterpret_cast<uint64_t &>(semaphore)); 5549 } 5550 dev_data->semaphoreMap.erase(semaphore); 5551 } 5552 // TODO : Clean up any internal data structures using this obj. 5553} 5554 5555VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) { 5556 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5557 bool skip_call = false; 5558 std::unique_lock<std::mutex> lock(global_lock); 5559 auto event_data = dev_data->eventMap.find(event); 5560 if (event_data != dev_data->eventMap.end()) { 5561 if (event_data->second.in_use.load()) { 5562 skip_call |= log_msg( 5563 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 5564 reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS", 5565 "Cannot delete event %" PRIx64 " which is in use by a command buffer.", reinterpret_cast<uint64_t &>(event)); 5566 } 5567 dev_data->eventMap.erase(event_data); 5568 } 5569 lock.unlock(); 5570 if (!skip_call) 5571 dev_data->device_dispatch_table->DestroyEvent(device, event, pAllocator); 5572 // TODO : Clean up any internal data structures using this obj. 5573} 5574 5575VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5576vkDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) { 5577 get_my_data_ptr(get_dispatch_key(device), layer_data_map) 5578 ->device_dispatch_table->DestroyQueryPool(device, queryPool, pAllocator); 5579 // TODO : Clean up any internal data structures using this obj. 5580} 5581 5582VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, 5583 uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride, 5584 VkQueryResultFlags flags) { 5585 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5586 unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight; 5587 GLOBAL_CB_NODE *pCB = nullptr; 5588 std::unique_lock<std::mutex> lock(global_lock); 5589 for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) { 5590 pCB = getCBNode(dev_data, cmdBuffer); 5591 for (auto queryStatePair : pCB->queryToStateMap) { 5592 queriesInFlight[queryStatePair.first].push_back(cmdBuffer); 5593 } 5594 } 5595 bool skip_call = false; 5596 for (uint32_t i = 0; i < queryCount; ++i) { 5597 QueryObject query = {queryPool, firstQuery + i}; 5598 auto queryElement = queriesInFlight.find(query); 5599 auto queryToStateElement = dev_data->queryToStateMap.find(query); 5600 if (queryToStateElement != dev_data->queryToStateMap.end()) { 5601 // Available and in flight 5602 if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() && 5603 queryToStateElement->second) { 5604 for (auto cmdBuffer : queryElement->second) { 5605 pCB = getCBNode(dev_data, cmdBuffer); 5606 auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query); 5607 if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) { 5608 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 5609 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS", 5610 "Cannot get query results on queryPool %" PRIu64 " with index %d which is in flight.", 5611 (uint64_t)(queryPool), firstQuery + i); 5612 } else { 5613 for (auto event : queryEventElement->second) { 5614 dev_data->eventMap[event].needsSignaled = true; 5615 } 5616 } 5617 } 5618 // Unavailable and in flight 5619 } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() && 5620 !queryToStateElement->second) { 5621 // TODO : Can there be the same query in use by multiple command buffers in flight? 5622 bool make_available = false; 5623 for (auto cmdBuffer : queryElement->second) { 5624 pCB = getCBNode(dev_data, cmdBuffer); 5625 make_available |= pCB->queryToStateMap[query]; 5626 } 5627 if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) { 5628 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 5629 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS", 5630 "Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.", 5631 (uint64_t)(queryPool), firstQuery + i); 5632 } 5633 // Unavailable 5634 } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) { 5635 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 5636 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS", 5637 "Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.", 5638 (uint64_t)(queryPool), firstQuery + i); 5639 // Unitialized 5640 } else if (queryToStateElement == dev_data->queryToStateMap.end()) { 5641 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 5642 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS", 5643 "Cannot get query results on queryPool %" PRIu64 5644 " with index %d as data has not been collected for this index.", 5645 (uint64_t)(queryPool), firstQuery + i); 5646 } 5647 } 5648 } 5649 lock.unlock(); 5650 if (skip_call) 5651 return VK_ERROR_VALIDATION_FAILED_EXT; 5652 return dev_data->device_dispatch_table->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, 5653 flags); 5654} 5655 5656static bool validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) { 5657 bool skip_call = false; 5658 auto buffer_data = my_data->bufferMap.find(buffer); 5659 if (buffer_data == my_data->bufferMap.end()) { 5660 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, 5661 (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS", 5662 "Cannot free buffer %" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer)); 5663 } else { 5664 if (buffer_data->second.in_use.load()) { 5665 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, 5666 (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS", 5667 "Cannot free buffer %" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer)); 5668 } 5669 } 5670 return skip_call; 5671} 5672 5673VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5674vkDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) { 5675 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5676 bool skipCall = false; 5677 std::unique_lock<std::mutex> lock(global_lock); 5678 if (!validateIdleBuffer(dev_data, buffer) && !skipCall) { 5679 lock.unlock(); 5680 dev_data->device_dispatch_table->DestroyBuffer(device, buffer, pAllocator); 5681 lock.lock(); 5682 } 5683 dev_data->bufferMap.erase(buffer); 5684} 5685 5686VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5687vkDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) { 5688 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5689 dev_data->device_dispatch_table->DestroyBufferView(device, bufferView, pAllocator); 5690 std::lock_guard<std::mutex> lock(global_lock); 5691 auto item = dev_data->bufferViewMap.find(bufferView); 5692 if (item != dev_data->bufferViewMap.end()) { 5693 dev_data->bufferViewMap.erase(item); 5694 } 5695} 5696 5697VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) { 5698 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5699 bool skipCall = false; 5700 if (!skipCall) 5701 dev_data->device_dispatch_table->DestroyImage(device, image, pAllocator); 5702 5703 std::lock_guard<std::mutex> lock(global_lock); 5704 const auto& entry = dev_data->imageMap.find(image); 5705 if (entry != dev_data->imageMap.end()) { 5706 // Clear any memory mapping for this image 5707 auto mem_entry = dev_data->memObjMap.find(entry->second.mem); 5708 if (mem_entry != dev_data->memObjMap.end()) 5709 mem_entry->second.image = VK_NULL_HANDLE; 5710 5711 // Remove image from imageMap 5712 dev_data->imageMap.erase(entry); 5713 } 5714 const auto& subEntry = dev_data->imageSubresourceMap.find(image); 5715 if (subEntry != dev_data->imageSubresourceMap.end()) { 5716 for (const auto& pair : subEntry->second) { 5717 dev_data->imageLayoutMap.erase(pair); 5718 } 5719 dev_data->imageSubresourceMap.erase(subEntry); 5720 } 5721} 5722#if MTMERGESOURCE 5723static bool print_memory_range_error(layer_data *dev_data, const uint64_t object_handle, const uint64_t other_handle, 5724 VkDebugReportObjectTypeEXT object_type) { 5725 if (object_type == VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT) { 5726 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0, 5727 MEMTRACK_INVALID_ALIASING, "MEM", "Buffer %" PRIx64 " is alised with image %" PRIx64, object_handle, 5728 other_handle); 5729 } else { 5730 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0, 5731 MEMTRACK_INVALID_ALIASING, "MEM", "Image %" PRIx64 " is alised with buffer %" PRIx64, object_handle, 5732 other_handle); 5733 } 5734} 5735 5736static bool validate_memory_range(layer_data *dev_data, const vector<MEMORY_RANGE> &ranges, const MEMORY_RANGE &new_range, 5737 VkDebugReportObjectTypeEXT object_type) { 5738 bool skip_call = false; 5739 5740 for (auto range : ranges) { 5741 if ((range.end & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)) < 5742 (new_range.start & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1))) 5743 continue; 5744 if ((range.start & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)) > 5745 (new_range.end & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1))) 5746 continue; 5747 skip_call |= print_memory_range_error(dev_data, new_range.handle, range.handle, object_type); 5748 } 5749 return skip_call; 5750} 5751 5752static bool validate_buffer_image_aliasing(layer_data *dev_data, uint64_t handle, VkDeviceMemory mem, VkDeviceSize memoryOffset, 5753 VkMemoryRequirements memRequirements, vector<MEMORY_RANGE> &ranges, 5754 const vector<MEMORY_RANGE> &other_ranges, VkDebugReportObjectTypeEXT object_type) { 5755 MEMORY_RANGE range; 5756 range.handle = handle; 5757 range.memory = mem; 5758 range.start = memoryOffset; 5759 range.end = memoryOffset + memRequirements.size - 1; 5760 ranges.push_back(range); 5761 return validate_memory_range(dev_data, other_ranges, range, object_type); 5762} 5763 5764VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 5765vkBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) { 5766 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5767 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 5768 std::unique_lock<std::mutex> lock(global_lock); 5769 // Track objects tied to memory 5770 uint64_t buffer_handle = (uint64_t)(buffer); 5771 bool skipCall = 5772 set_mem_binding(dev_data, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory"); 5773 auto buffer_node = dev_data->bufferMap.find(buffer); 5774 if (buffer_node != dev_data->bufferMap.end()) { 5775 buffer_node->second.mem = mem; 5776 VkMemoryRequirements memRequirements; 5777 dev_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, &memRequirements); 5778 skipCall |= validate_buffer_image_aliasing(dev_data, buffer_handle, mem, memoryOffset, memRequirements, 5779 dev_data->memObjMap[mem].bufferRanges, dev_data->memObjMap[mem].imageRanges, 5780 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT); 5781 // Validate memory requirements alignment 5782 if (vk_safe_modulo(memoryOffset, memRequirements.alignment) != 0) { 5783 skipCall |= 5784 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, 5785 __LINE__, DRAWSTATE_INVALID_BUFFER_MEMORY_OFFSET, "DS", 5786 "vkBindBufferMemory(): memoryOffset is %#" PRIxLEAST64 " but must be an integer multiple of the " 5787 "VkMemoryRequirements::alignment value %#" PRIxLEAST64 5788 ", returned from a call to vkGetBufferMemoryRequirements with buffer", 5789 memoryOffset, memRequirements.alignment); 5790 } 5791 // Validate device limits alignments 5792 VkBufferUsageFlags usage = dev_data->bufferMap[buffer].createInfo.usage; 5793 if (usage & (VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT)) { 5794 if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment) != 0) { 5795 skipCall |= 5796 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 5797 0, __LINE__, DRAWSTATE_INVALID_TEXEL_BUFFER_OFFSET, "DS", 5798 "vkBindBufferMemory(): memoryOffset is %#" PRIxLEAST64 " but must be a multiple of " 5799 "device limit minTexelBufferOffsetAlignment %#" PRIxLEAST64, 5800 memoryOffset, dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment); 5801 } 5802 } 5803 if (usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) { 5804 if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 5805 0) { 5806 skipCall |= 5807 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 5808 0, __LINE__, DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS", 5809 "vkBindBufferMemory(): memoryOffset is %#" PRIxLEAST64 " but must be a multiple of " 5810 "device limit minUniformBufferOffsetAlignment %#" PRIxLEAST64, 5811 memoryOffset, dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment); 5812 } 5813 } 5814 if (usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) { 5815 if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 5816 0) { 5817 skipCall |= 5818 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 5819 0, __LINE__, DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS", 5820 "vkBindBufferMemory(): memoryOffset is %#" PRIxLEAST64 " but must be a multiple of " 5821 "device limit minStorageBufferOffsetAlignment %#" PRIxLEAST64, 5822 memoryOffset, dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment); 5823 } 5824 } 5825 } 5826 print_mem_list(dev_data); 5827 lock.unlock(); 5828 if (!skipCall) { 5829 result = dev_data->device_dispatch_table->BindBufferMemory(device, buffer, mem, memoryOffset); 5830 } 5831 return result; 5832} 5833 5834VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5835vkGetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) { 5836 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5837 // TODO : What to track here? 5838 // Could potentially save returned mem requirements and validate values passed into BindBufferMemory 5839 my_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, pMemoryRequirements); 5840} 5841 5842VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5843vkGetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) { 5844 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5845 // TODO : What to track here? 5846 // Could potentially save returned mem requirements and validate values passed into BindImageMemory 5847 my_data->device_dispatch_table->GetImageMemoryRequirements(device, image, pMemoryRequirements); 5848} 5849#endif 5850VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5851vkDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) { 5852 get_my_data_ptr(get_dispatch_key(device), layer_data_map) 5853 ->device_dispatch_table->DestroyImageView(device, imageView, pAllocator); 5854 // TODO : Clean up any internal data structures using this obj. 5855} 5856 5857VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5858vkDestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) { 5859 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5860 5861 std::unique_lock<std::mutex> lock(global_lock); 5862 my_data->shaderModuleMap.erase(shaderModule); 5863 lock.unlock(); 5864 5865 my_data->device_dispatch_table->DestroyShaderModule(device, shaderModule, pAllocator); 5866} 5867 5868VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5869vkDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) { 5870 get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyPipeline(device, pipeline, pAllocator); 5871 // TODO : Clean up any internal data structures using this obj. 5872} 5873 5874VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5875vkDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) { 5876 get_my_data_ptr(get_dispatch_key(device), layer_data_map) 5877 ->device_dispatch_table->DestroyPipelineLayout(device, pipelineLayout, pAllocator); 5878 // TODO : Clean up any internal data structures using this obj. 5879} 5880 5881VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5882vkDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) { 5883 get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroySampler(device, sampler, pAllocator); 5884 // TODO : Clean up any internal data structures using this obj. 5885} 5886 5887VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5888vkDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) { 5889 get_my_data_ptr(get_dispatch_key(device), layer_data_map) 5890 ->device_dispatch_table->DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator); 5891 // TODO : Clean up any internal data structures using this obj. 5892} 5893 5894VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5895vkDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) { 5896 get_my_data_ptr(get_dispatch_key(device), layer_data_map) 5897 ->device_dispatch_table->DestroyDescriptorPool(device, descriptorPool, pAllocator); 5898 // TODO : Clean up any internal data structures using this obj. 5899} 5900// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip_call result 5901// If this is a secondary command buffer, then make sure its primary is also in-flight 5902// If primary is not in-flight, then remove secondary from global in-flight set 5903// This function is only valid at a point when cmdBuffer is being reset or freed 5904static bool checkAndClearCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action) { 5905 bool skip_call = false; 5906 if (dev_data->globalInFlightCmdBuffers.count(cb_node->commandBuffer)) { 5907 // Primary CB or secondary where primary is also in-flight is an error 5908 if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_SECONDARY) || 5909 (dev_data->globalInFlightCmdBuffers.count(cb_node->primaryCommandBuffer))) { 5910 skip_call |= log_msg( 5911 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 5912 reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS", 5913 "Attempt to %s command buffer (%#" PRIxLEAST64 ") which is in use.", action, 5914 reinterpret_cast<const uint64_t &>(cb_node->commandBuffer)); 5915 } else { // Secondary CB w/o primary in-flight, remove from in-flight 5916 dev_data->globalInFlightCmdBuffers.erase(cb_node->commandBuffer); 5917 } 5918 } 5919 return skip_call; 5920} 5921// Iterate over all cmdBuffers in given commandPool and verify that each is not in use 5922static bool checkAndClearCommandBuffersInFlight(layer_data *dev_data, const VkCommandPool commandPool, const char *action) { 5923 bool skip_call = false; 5924 auto pool_data = dev_data->commandPoolMap.find(commandPool); 5925 if (pool_data != dev_data->commandPoolMap.end()) { 5926 for (auto cmd_buffer : pool_data->second.commandBuffers) { 5927 if (dev_data->globalInFlightCmdBuffers.count(cmd_buffer)) { 5928 skip_call |= checkAndClearCommandBufferInFlight(dev_data, getCBNode(dev_data, cmd_buffer), action); 5929 } 5930 } 5931 } 5932 return skip_call; 5933} 5934 5935VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5936vkFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) { 5937 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5938 5939 bool skip_call = false; 5940 std::unique_lock<std::mutex> lock(global_lock); 5941 for (uint32_t i = 0; i < commandBufferCount; i++) { 5942 auto cb_pair = dev_data->commandBufferMap.find(pCommandBuffers[i]); 5943 skip_call |= checkAndClearCommandBufferInFlight(dev_data, cb_pair->second, "free"); 5944 // Delete CB information structure, and remove from commandBufferMap 5945 if (cb_pair != dev_data->commandBufferMap.end()) { 5946 // reset prior to delete for data clean-up 5947 resetCB(dev_data, (*cb_pair).second->commandBuffer); 5948 delete (*cb_pair).second; 5949 dev_data->commandBufferMap.erase(cb_pair); 5950 } 5951 5952 // Remove commandBuffer reference from commandPoolMap 5953 dev_data->commandPoolMap[commandPool].commandBuffers.remove(pCommandBuffers[i]); 5954 } 5955#if MTMERGESOURCE 5956 printCBList(dev_data); 5957#endif 5958 lock.unlock(); 5959 5960 if (!skip_call) 5961 dev_data->device_dispatch_table->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers); 5962} 5963 5964VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo, 5965 const VkAllocationCallbacks *pAllocator, 5966 VkCommandPool *pCommandPool) { 5967 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5968 5969 VkResult result = dev_data->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool); 5970 5971 if (VK_SUCCESS == result) { 5972 std::lock_guard<std::mutex> lock(global_lock); 5973 dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags; 5974 dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex; 5975 } 5976 return result; 5977} 5978 5979VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo, 5980 const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) { 5981 5982 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5983 VkResult result = dev_data->device_dispatch_table->CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool); 5984 if (result == VK_SUCCESS) { 5985 std::lock_guard<std::mutex> lock(global_lock); 5986 dev_data->queryPoolMap[*pQueryPool].createInfo = *pCreateInfo; 5987 } 5988 return result; 5989} 5990 5991// Destroy commandPool along with all of the commandBuffers allocated from that pool 5992VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5993vkDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) { 5994 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5995 bool skipCall = false; 5996 std::unique_lock<std::mutex> lock(global_lock); 5997 // Verify that command buffers in pool are complete (not in-flight) 5998 VkBool32 result = checkAndClearCommandBuffersInFlight(dev_data, commandPool, "destroy command pool with"); 5999 // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandPoolMap 6000 if (dev_data->commandPoolMap.find(commandPool) != dev_data->commandPoolMap.end()) { 6001 for (auto poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.begin(); 6002 poolCb != dev_data->commandPoolMap[commandPool].commandBuffers.end();) { 6003 clear_cmd_buf_and_mem_references(dev_data, *poolCb); 6004 auto del_cb = dev_data->commandBufferMap.find(*poolCb); 6005 delete (*del_cb).second; // delete CB info structure 6006 dev_data->commandBufferMap.erase(del_cb); // Remove this command buffer 6007 poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.erase( 6008 poolCb); // Remove CB reference from commandPoolMap's list 6009 } 6010 } 6011 dev_data->commandPoolMap.erase(commandPool); 6012 6013 lock.unlock(); 6014 6015 if (result) 6016 return; 6017 6018 if (!skipCall) 6019 dev_data->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator); 6020} 6021 6022VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 6023vkResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) { 6024 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6025 bool skipCall = false; 6026 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 6027 6028 if (checkAndClearCommandBuffersInFlight(dev_data, commandPool, "reset command pool with")) 6029 return VK_ERROR_VALIDATION_FAILED_EXT; 6030 6031 if (!skipCall) 6032 result = dev_data->device_dispatch_table->ResetCommandPool(device, commandPool, flags); 6033 6034 // Reset all of the CBs allocated from this pool 6035 if (VK_SUCCESS == result) { 6036 std::lock_guard<std::mutex> lock(global_lock); 6037 auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin(); 6038 while (it != dev_data->commandPoolMap[commandPool].commandBuffers.end()) { 6039 resetCB(dev_data, (*it)); 6040 ++it; 6041 } 6042 } 6043 return result; 6044} 6045 6046VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) { 6047 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6048 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 6049 bool skipCall = false; 6050 std::unique_lock<std::mutex> lock(global_lock); 6051 for (uint32_t i = 0; i < fenceCount; ++i) { 6052 auto fence_item = dev_data->fenceMap.find(pFences[i]); 6053 if (fence_item != dev_data->fenceMap.end()) { 6054 fence_item->second.needsSignaled = true; 6055 if (fence_item->second.in_use.load()) { 6056 skipCall |= 6057 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, 6058 reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS", 6059 "Fence %#" PRIx64 " is in use by a command buffer.", reinterpret_cast<const uint64_t &>(pFences[i])); 6060 } 6061 } 6062 } 6063 lock.unlock(); 6064 if (!skipCall) 6065 result = dev_data->device_dispatch_table->ResetFences(device, fenceCount, pFences); 6066 return result; 6067} 6068 6069VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 6070vkDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) { 6071 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6072 std::unique_lock<std::mutex> lock(global_lock); 6073 auto fbNode = dev_data->frameBufferMap.find(framebuffer); 6074 if (fbNode != dev_data->frameBufferMap.end()) { 6075 for (auto cb : fbNode->second.referencingCmdBuffers) { 6076 auto cbNode = dev_data->commandBufferMap.find(cb); 6077 if (cbNode != dev_data->commandBufferMap.end()) { 6078 // Set CB as invalid and record destroyed framebuffer 6079 cbNode->second->state = CB_INVALID; 6080 cbNode->second->destroyedFramebuffers.insert(framebuffer); 6081 } 6082 } 6083 delete [] fbNode->second.createInfo.pAttachments; 6084 dev_data->frameBufferMap.erase(fbNode); 6085 } 6086 lock.unlock(); 6087 dev_data->device_dispatch_table->DestroyFramebuffer(device, framebuffer, pAllocator); 6088} 6089 6090VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 6091vkDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) { 6092 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6093 dev_data->device_dispatch_table->DestroyRenderPass(device, renderPass, pAllocator); 6094 std::lock_guard<std::mutex> lock(global_lock); 6095 dev_data->renderPassMap.erase(renderPass); 6096} 6097 6098VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo, 6099 const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) { 6100 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6101 6102 VkResult result = dev_data->device_dispatch_table->CreateBuffer(device, pCreateInfo, pAllocator, pBuffer); 6103 6104 if (VK_SUCCESS == result) { 6105 std::lock_guard<std::mutex> lock(global_lock); 6106 // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid 6107 dev_data->bufferMap[*pBuffer].createInfo = *pCreateInfo; 6108 dev_data->bufferMap[*pBuffer].in_use.store(0); 6109 } 6110 return result; 6111} 6112 6113VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo, 6114 const VkAllocationCallbacks *pAllocator, VkBufferView *pView) { 6115 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6116 VkResult result = dev_data->device_dispatch_table->CreateBufferView(device, pCreateInfo, pAllocator, pView); 6117 if (VK_SUCCESS == result) { 6118 std::lock_guard<std::mutex> lock(global_lock); 6119 dev_data->bufferViewMap[*pView] = VkBufferViewCreateInfo(*pCreateInfo); 6120#if MTMERGESOURCE 6121 // In order to create a valid buffer view, the buffer must have been created with at least one of the 6122 // following flags: UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT 6123 validate_buffer_usage_flags(dev_data, pCreateInfo->buffer, 6124 VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, false, 6125 "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT"); 6126#endif 6127 } 6128 return result; 6129} 6130 6131VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo, 6132 const VkAllocationCallbacks *pAllocator, VkImage *pImage) { 6133 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6134 6135 VkResult result = dev_data->device_dispatch_table->CreateImage(device, pCreateInfo, pAllocator, pImage); 6136 6137 if (VK_SUCCESS == result) { 6138 std::lock_guard<std::mutex> lock(global_lock); 6139 IMAGE_LAYOUT_NODE image_node; 6140 image_node.layout = pCreateInfo->initialLayout; 6141 image_node.format = pCreateInfo->format; 6142 dev_data->imageMap[*pImage].createInfo = *pCreateInfo; 6143 ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()}; 6144 dev_data->imageSubresourceMap[*pImage].push_back(subpair); 6145 dev_data->imageLayoutMap[subpair] = image_node; 6146 } 6147 return result; 6148} 6149 6150static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) { 6151 /* expects global_lock to be held by caller */ 6152 6153 auto image_node_it = dev_data->imageMap.find(image); 6154 if (image_node_it != dev_data->imageMap.end()) { 6155 /* If the caller used the special values VK_REMAINING_MIP_LEVELS and 6156 * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to 6157 * the actual values. 6158 */ 6159 if (range->levelCount == VK_REMAINING_MIP_LEVELS) { 6160 range->levelCount = image_node_it->second.createInfo.mipLevels - range->baseMipLevel; 6161 } 6162 6163 if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) { 6164 range->layerCount = image_node_it->second.createInfo.arrayLayers - range->baseArrayLayer; 6165 } 6166 } 6167} 6168 6169// Return the correct layer/level counts if the caller used the special 6170// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS. 6171static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range, 6172 VkImage image) { 6173 /* expects global_lock to be held by caller */ 6174 6175 *levels = range.levelCount; 6176 *layers = range.layerCount; 6177 auto image_node_it = dev_data->imageMap.find(image); 6178 if (image_node_it != dev_data->imageMap.end()) { 6179 if (range.levelCount == VK_REMAINING_MIP_LEVELS) { 6180 *levels = image_node_it->second.createInfo.mipLevels - range.baseMipLevel; 6181 } 6182 if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) { 6183 *layers = image_node_it->second.createInfo.arrayLayers - range.baseArrayLayer; 6184 } 6185 } 6186} 6187 6188VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo, 6189 const VkAllocationCallbacks *pAllocator, VkImageView *pView) { 6190 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6191 VkResult result = dev_data->device_dispatch_table->CreateImageView(device, pCreateInfo, pAllocator, pView); 6192 if (VK_SUCCESS == result) { 6193 std::lock_guard<std::mutex> lock(global_lock); 6194 VkImageViewCreateInfo localCI = VkImageViewCreateInfo(*pCreateInfo); 6195 ResolveRemainingLevelsLayers(dev_data, &localCI.subresourceRange, pCreateInfo->image); 6196 dev_data->imageViewMap[*pView] = localCI; 6197#if MTMERGESOURCE 6198 // Validate that img has correct usage flags set 6199 validate_image_usage_flags(dev_data, pCreateInfo->image, 6200 VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | 6201 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, 6202 false, "vkCreateImageView()", "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT]_BIT"); 6203#endif 6204 } 6205 return result; 6206} 6207 6208VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 6209vkCreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) { 6210 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6211 VkResult result = dev_data->device_dispatch_table->CreateFence(device, pCreateInfo, pAllocator, pFence); 6212 if (VK_SUCCESS == result) { 6213 std::lock_guard<std::mutex> lock(global_lock); 6214 auto &fence_node = dev_data->fenceMap[*pFence]; 6215 fence_node.createInfo = *pCreateInfo; 6216 fence_node.needsSignaled = true; 6217 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) { 6218 fence_node.firstTimeFlag = true; 6219 fence_node.needsSignaled = false; 6220 } 6221 fence_node.in_use.store(0); 6222 } 6223 return result; 6224} 6225 6226// TODO handle pipeline caches 6227VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo, 6228 const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) { 6229 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6230 VkResult result = dev_data->device_dispatch_table->CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache); 6231 return result; 6232} 6233 6234VKAPI_ATTR void VKAPI_CALL 6235vkDestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) { 6236 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6237 dev_data->device_dispatch_table->DestroyPipelineCache(device, pipelineCache, pAllocator); 6238} 6239 6240VKAPI_ATTR VkResult VKAPI_CALL 6241vkGetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) { 6242 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6243 VkResult result = dev_data->device_dispatch_table->GetPipelineCacheData(device, pipelineCache, pDataSize, pData); 6244 return result; 6245} 6246 6247VKAPI_ATTR VkResult VKAPI_CALL 6248vkMergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) { 6249 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6250 VkResult result = dev_data->device_dispatch_table->MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches); 6251 return result; 6252} 6253 6254// utility function to set collective state for pipeline 6255void set_pipeline_state(PIPELINE_NODE *pPipe) { 6256 // If any attachment used by this pipeline has blendEnable, set top-level blendEnable 6257 if (pPipe->graphicsPipelineCI.pColorBlendState) { 6258 for (size_t i = 0; i < pPipe->attachments.size(); ++i) { 6259 if (VK_TRUE == pPipe->attachments[i].blendEnable) { 6260 if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) && 6261 (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) || 6262 ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) && 6263 (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) || 6264 ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) && 6265 (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) || 6266 ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) && 6267 (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) { 6268 pPipe->blendConstantsEnabled = true; 6269 } 6270 } 6271 } 6272 } 6273} 6274 6275VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 6276vkCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, 6277 const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, 6278 VkPipeline *pPipelines) { 6279 VkResult result = VK_SUCCESS; 6280 // TODO What to do with pipelineCache? 6281 // The order of operations here is a little convoluted but gets the job done 6282 // 1. Pipeline create state is first shadowed into PIPELINE_NODE struct 6283 // 2. Create state is then validated (which uses flags setup during shadowing) 6284 // 3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap 6285 bool skipCall = false; 6286 // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic 6287 vector<PIPELINE_NODE *> pPipeNode(count); 6288 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6289 6290 uint32_t i = 0; 6291 std::unique_lock<std::mutex> lock(global_lock); 6292 6293 for (i = 0; i < count; i++) { 6294 pPipeNode[i] = new PIPELINE_NODE; 6295 pPipeNode[i]->initGraphicsPipeline(&pCreateInfos[i]); 6296 skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode, i); 6297 } 6298 6299 if (!skipCall) { 6300 lock.unlock(); 6301 result = dev_data->device_dispatch_table->CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, 6302 pPipelines); 6303 lock.lock(); 6304 for (i = 0; i < count; i++) { 6305 pPipeNode[i]->pipeline = pPipelines[i]; 6306 dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i]; 6307 } 6308 lock.unlock(); 6309 } else { 6310 for (i = 0; i < count; i++) { 6311 delete pPipeNode[i]; 6312 } 6313 lock.unlock(); 6314 return VK_ERROR_VALIDATION_FAILED_EXT; 6315 } 6316 return result; 6317} 6318 6319VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 6320vkCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, 6321 const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, 6322 VkPipeline *pPipelines) { 6323 VkResult result = VK_SUCCESS; 6324 bool skipCall = false; 6325 6326 // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic 6327 vector<PIPELINE_NODE *> pPipeNode(count); 6328 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6329 6330 uint32_t i = 0; 6331 std::unique_lock<std::mutex> lock(global_lock); 6332 for (i = 0; i < count; i++) { 6333 // TODO: Verify compute stage bits 6334 6335 // Create and initialize internal tracking data structure 6336 pPipeNode[i] = new PIPELINE_NODE; 6337 pPipeNode[i]->initComputePipeline(&pCreateInfos[i]); 6338 // memcpy(&pPipeNode[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo)); 6339 6340 // TODO: Add Compute Pipeline Verification 6341 // skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode[i]); 6342 } 6343 6344 if (!skipCall) { 6345 lock.unlock(); 6346 result = dev_data->device_dispatch_table->CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, 6347 pPipelines); 6348 lock.lock(); 6349 for (i = 0; i < count; i++) { 6350 pPipeNode[i]->pipeline = pPipelines[i]; 6351 dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i]; 6352 } 6353 lock.unlock(); 6354 } else { 6355 for (i = 0; i < count; i++) { 6356 // Clean up any locally allocated data structures 6357 delete pPipeNode[i]; 6358 } 6359 lock.unlock(); 6360 return VK_ERROR_VALIDATION_FAILED_EXT; 6361 } 6362 return result; 6363} 6364 6365VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo, 6366 const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) { 6367 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6368 VkResult result = dev_data->device_dispatch_table->CreateSampler(device, pCreateInfo, pAllocator, pSampler); 6369 if (VK_SUCCESS == result) { 6370 std::lock_guard<std::mutex> lock(global_lock); 6371 dev_data->sampleMap[*pSampler] = unique_ptr<SAMPLER_NODE>(new SAMPLER_NODE(pSampler, pCreateInfo)); 6372 } 6373 return result; 6374} 6375 6376VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 6377vkCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo, 6378 const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) { 6379 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6380 VkResult result = dev_data->device_dispatch_table->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout); 6381 if (VK_SUCCESS == result) { 6382 // TODOSC : Capture layout bindings set 6383 LAYOUT_NODE *pNewNode = new LAYOUT_NODE; 6384 if (NULL == pNewNode) { 6385 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, 6386 (uint64_t)*pSetLayout, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", 6387 "Out of memory while attempting to allocate LAYOUT_NODE in vkCreateDescriptorSetLayout()")) 6388 return VK_ERROR_VALIDATION_FAILED_EXT; 6389 } 6390 memcpy((void *)&pNewNode->createInfo, pCreateInfo, sizeof(VkDescriptorSetLayoutCreateInfo)); 6391 pNewNode->createInfo.pBindings = new VkDescriptorSetLayoutBinding[pCreateInfo->bindingCount]; 6392 memcpy((void *)pNewNode->createInfo.pBindings, pCreateInfo->pBindings, 6393 sizeof(VkDescriptorSetLayoutBinding) * pCreateInfo->bindingCount); 6394 // g++ does not like reserve with size 0 6395 if (pCreateInfo->bindingCount) 6396 pNewNode->bindingToIndexMap.reserve(pCreateInfo->bindingCount); 6397 uint32_t totalCount = 0; 6398 for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) { 6399 if (!pNewNode->bindingToIndexMap.emplace(pCreateInfo->pBindings[i].binding, i).second) { 6400 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 6401 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)*pSetLayout, __LINE__, 6402 DRAWSTATE_INVALID_LAYOUT, "DS", "duplicated binding number in " 6403 "VkDescriptorSetLayoutBinding")) 6404 return VK_ERROR_VALIDATION_FAILED_EXT; 6405 } else { 6406 pNewNode->bindingToIndexMap[pCreateInfo->pBindings[i].binding] = i; 6407 } 6408 totalCount += pCreateInfo->pBindings[i].descriptorCount; 6409 if (pCreateInfo->pBindings[i].pImmutableSamplers) { 6410 VkSampler **ppIS = (VkSampler **)&pNewNode->createInfo.pBindings[i].pImmutableSamplers; 6411 *ppIS = new VkSampler[pCreateInfo->pBindings[i].descriptorCount]; 6412 memcpy(*ppIS, pCreateInfo->pBindings[i].pImmutableSamplers, 6413 pCreateInfo->pBindings[i].descriptorCount * sizeof(VkSampler)); 6414 pNewNode->immutableSamplerCount += pCreateInfo->pBindings[i].descriptorCount; 6415 } 6416 } 6417 pNewNode->layout = *pSetLayout; 6418 pNewNode->startIndex = 0; 6419 if (totalCount > 0) { 6420 pNewNode->descriptorTypes.resize(totalCount); 6421 pNewNode->stageFlags.resize(totalCount); 6422 uint32_t offset = 0; 6423 uint32_t j = 0; 6424 VkDescriptorType dType; 6425 for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) { 6426 dType = pCreateInfo->pBindings[i].descriptorType; 6427 for (j = 0; j < pCreateInfo->pBindings[i].descriptorCount; j++) { 6428 pNewNode->descriptorTypes[offset + j] = dType; 6429 pNewNode->stageFlags[offset + j] = pCreateInfo->pBindings[i].stageFlags; 6430 if ((dType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) || 6431 (dType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) { 6432 pNewNode->dynamicDescriptorCount++; 6433 } 6434 } 6435 offset += j; 6436 } 6437 pNewNode->endIndex = pNewNode->startIndex + totalCount - 1; 6438 } else { // no descriptors 6439 pNewNode->endIndex = 0; 6440 } 6441 // Put new node at Head of global Layer list 6442 std::lock_guard<std::mutex> lock(global_lock); 6443 dev_data->descriptorSetLayoutMap[*pSetLayout] = pNewNode; 6444 } 6445 return result; 6446} 6447 6448static bool validatePushConstantSize(const layer_data *dev_data, const uint32_t offset, const uint32_t size, 6449 const char *caller_name) { 6450 bool skipCall = false; 6451 if ((offset + size) > dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize) { 6452 skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 6453 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that " 6454 "exceeds this device's maxPushConstantSize of %u.", 6455 caller_name, offset, size, dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize); 6456 } 6457 return skipCall; 6458} 6459 6460VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo, 6461 const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) { 6462 bool skipCall = false; 6463 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6464 uint32_t i = 0; 6465 for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) { 6466 skipCall |= validatePushConstantSize(dev_data, pCreateInfo->pPushConstantRanges[i].offset, 6467 pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()"); 6468 if ((pCreateInfo->pPushConstantRanges[i].size == 0) || ((pCreateInfo->pPushConstantRanges[i].size & 0x3) != 0)) { 6469 skipCall |= 6470 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 6471 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constant index %u with " 6472 "size %u. Size must be greater than zero and a multiple of 4.", 6473 i, pCreateInfo->pPushConstantRanges[i].size); 6474 } 6475 // TODO : Add warning if ranges overlap 6476 } 6477 VkResult result = dev_data->device_dispatch_table->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout); 6478 if (VK_SUCCESS == result) { 6479 std::lock_guard<std::mutex> lock(global_lock); 6480 // TODOSC : Merge capture of the setLayouts per pipeline 6481 PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout]; 6482 plNode.descriptorSetLayouts.resize(pCreateInfo->setLayoutCount); 6483 for (i = 0; i < pCreateInfo->setLayoutCount; ++i) { 6484 plNode.descriptorSetLayouts[i] = pCreateInfo->pSetLayouts[i]; 6485 } 6486 plNode.pushConstantRanges.resize(pCreateInfo->pushConstantRangeCount); 6487 for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) { 6488 plNode.pushConstantRanges[i] = pCreateInfo->pPushConstantRanges[i]; 6489 } 6490 } 6491 return result; 6492} 6493 6494VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 6495vkCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, 6496 VkDescriptorPool *pDescriptorPool) { 6497 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6498 VkResult result = dev_data->device_dispatch_table->CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool); 6499 if (VK_SUCCESS == result) { 6500 // Insert this pool into Global Pool LL at head 6501 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, 6502 (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool %#" PRIxLEAST64, 6503 (uint64_t)*pDescriptorPool)) 6504 return VK_ERROR_VALIDATION_FAILED_EXT; 6505 DESCRIPTOR_POOL_NODE *pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo); 6506 if (NULL == pNewNode) { 6507 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, 6508 (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", 6509 "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()")) 6510 return VK_ERROR_VALIDATION_FAILED_EXT; 6511 } else { 6512 std::lock_guard<std::mutex> lock(global_lock); 6513 dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode; 6514 } 6515 } else { 6516 // Need to do anything if pool create fails? 6517 } 6518 return result; 6519} 6520 6521VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 6522vkResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) { 6523 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6524 VkResult result = dev_data->device_dispatch_table->ResetDescriptorPool(device, descriptorPool, flags); 6525 if (VK_SUCCESS == result) { 6526 std::lock_guard<std::mutex> lock(global_lock); 6527 clearDescriptorPool(dev_data, device, descriptorPool, flags); 6528 } 6529 return result; 6530} 6531 6532VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 6533vkAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) { 6534 bool skipCall = false; 6535 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6536 6537 std::unique_lock<std::mutex> lock(global_lock); 6538 // Verify that requested descriptorSets are available in pool 6539 DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool); 6540 if (!pPoolNode) { 6541 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, 6542 (uint64_t)pAllocateInfo->descriptorPool, __LINE__, DRAWSTATE_INVALID_POOL, "DS", 6543 "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call", 6544 (uint64_t)pAllocateInfo->descriptorPool); 6545 } else { // Make sure pool has all the available descriptors before calling down chain 6546 skipCall |= validate_descriptor_availability_in_pool(dev_data, pPoolNode, pAllocateInfo->descriptorSetCount, 6547 pAllocateInfo->pSetLayouts); 6548 } 6549 lock.unlock(); 6550 if (skipCall) 6551 return VK_ERROR_VALIDATION_FAILED_EXT; 6552 VkResult result = dev_data->device_dispatch_table->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets); 6553 if (VK_SUCCESS == result) { 6554 lock.lock(); 6555 DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool); 6556 if (pPoolNode) { 6557 if (pAllocateInfo->descriptorSetCount == 0) { 6558 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 6559 pAllocateInfo->descriptorSetCount, __LINE__, DRAWSTATE_NONE, "DS", 6560 "AllocateDescriptorSets called with 0 count"); 6561 } 6562 for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) { 6563 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 6564 (uint64_t)pDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS", "Created Descriptor Set %#" PRIxLEAST64, 6565 (uint64_t)pDescriptorSets[i]); 6566 // Create new set node and add to head of pool nodes 6567 SET_NODE *pNewNode = new SET_NODE; 6568 if (NULL == pNewNode) { 6569 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 6570 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__, 6571 DRAWSTATE_OUT_OF_MEMORY, "DS", 6572 "Out of memory while attempting to allocate SET_NODE in vkAllocateDescriptorSets()")) { 6573 lock.unlock(); 6574 return VK_ERROR_VALIDATION_FAILED_EXT; 6575 } 6576 } else { 6577 // TODO : Pool should store a total count of each type of Descriptor available 6578 // When descriptors are allocated, decrement the count and validate here 6579 // that the count doesn't go below 0. One reset/free need to bump count back up. 6580 // Insert set at head of Set LL for this pool 6581 pNewNode->pNext = pPoolNode->pSets; 6582 pNewNode->in_use.store(0); 6583 pPoolNode->pSets = pNewNode; 6584 LAYOUT_NODE *pLayout = getLayoutNode(dev_data, pAllocateInfo->pSetLayouts[i]); 6585 if (NULL == pLayout) { 6586 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 6587 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)pAllocateInfo->pSetLayouts[i], 6588 __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS", 6589 "Unable to find set layout node for layout %#" PRIxLEAST64 6590 " specified in vkAllocateDescriptorSets() call", 6591 (uint64_t)pAllocateInfo->pSetLayouts[i])) { 6592 lock.unlock(); 6593 return VK_ERROR_VALIDATION_FAILED_EXT; 6594 } 6595 } 6596 pNewNode->pLayout = pLayout; 6597 pNewNode->pool = pAllocateInfo->descriptorPool; 6598 pNewNode->set = pDescriptorSets[i]; 6599 pNewNode->descriptorCount = (pLayout->createInfo.bindingCount != 0) ? pLayout->endIndex + 1 : 0; 6600 if (pNewNode->descriptorCount) { 6601 pNewNode->pDescriptorUpdates.resize(pNewNode->descriptorCount); 6602 } 6603 dev_data->setMap[pDescriptorSets[i]] = pNewNode; 6604 } 6605 } 6606 } 6607 lock.unlock(); 6608 } 6609 return result; 6610} 6611 6612VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 6613vkFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) { 6614 bool skipCall = false; 6615 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6616 // Make sure that no sets being destroyed are in-flight 6617 std::unique_lock<std::mutex> lock(global_lock); 6618 for (uint32_t i = 0; i < count; ++i) 6619 skipCall |= validateIdleDescriptorSet(dev_data, pDescriptorSets[i], "vkFreeDescriptorSets"); 6620 DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, descriptorPool); 6621 if (pPoolNode && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pPoolNode->createInfo.flags)) { 6622 // Can't Free from a NON_FREE pool 6623 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 6624 (uint64_t)device, __LINE__, DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, "DS", 6625 "It is invalid to call vkFreeDescriptorSets() with a pool created without setting " 6626 "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT."); 6627 } 6628 lock.unlock(); 6629 if (skipCall) 6630 return VK_ERROR_VALIDATION_FAILED_EXT; 6631 VkResult result = dev_data->device_dispatch_table->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets); 6632 if (VK_SUCCESS == result) { 6633 lock.lock(); 6634 6635 // Update available descriptor sets in pool 6636 pPoolNode->availableSets += count; 6637 6638 // For each freed descriptor add it back into the pool as available 6639 for (uint32_t i = 0; i < count; ++i) { 6640 SET_NODE *pSet = dev_data->setMap[pDescriptorSets[i]]; // getSetNode() without locking 6641 invalidateBoundCmdBuffers(dev_data, pSet); 6642 LAYOUT_NODE *pLayout = pSet->pLayout; 6643 uint32_t typeIndex = 0, poolSizeCount = 0; 6644 for (uint32_t j = 0; j < pLayout->createInfo.bindingCount; ++j) { 6645 typeIndex = static_cast<uint32_t>(pLayout->createInfo.pBindings[j].descriptorType); 6646 poolSizeCount = pLayout->createInfo.pBindings[j].descriptorCount; 6647 pPoolNode->availableDescriptorTypeCount[typeIndex] += poolSizeCount; 6648 } 6649 } 6650 lock.unlock(); 6651 } 6652 // TODO : Any other clean-up or book-keeping to do here? 6653 return result; 6654} 6655 6656VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 6657vkUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites, 6658 uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) { 6659 // dsUpdate will return true only if a bailout error occurs, so we want to call down tree when update returns false 6660 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6661 std::unique_lock<std::mutex> lock(global_lock); 6662 bool rtn = dsUpdate(dev_data, device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies); 6663 lock.unlock(); 6664 if (!rtn) { 6665 dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, 6666 pDescriptorCopies); 6667 } 6668} 6669 6670VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 6671vkAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) { 6672 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6673 VkResult result = dev_data->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer); 6674 if (VK_SUCCESS == result) { 6675 std::unique_lock<std::mutex> lock(global_lock); 6676 auto const &cp_it = dev_data->commandPoolMap.find(pCreateInfo->commandPool); 6677 if (cp_it != dev_data->commandPoolMap.end()) { 6678 for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) { 6679 // Add command buffer to its commandPool map 6680 cp_it->second.commandBuffers.push_back(pCommandBuffer[i]); 6681 GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE; 6682 // Add command buffer to map 6683 dev_data->commandBufferMap[pCommandBuffer[i]] = pCB; 6684 resetCB(dev_data, pCommandBuffer[i]); 6685 pCB->createInfo = *pCreateInfo; 6686 pCB->device = device; 6687 } 6688 } 6689#if MTMERGESOURCE 6690 printCBList(dev_data); 6691#endif 6692 lock.unlock(); 6693 } 6694 return result; 6695} 6696 6697VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 6698vkBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) { 6699 bool skipCall = false; 6700 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6701 std::unique_lock<std::mutex> lock(global_lock); 6702 // Validate command buffer level 6703 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6704 if (pCB) { 6705 // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references 6706 if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) { 6707 skipCall |= 6708 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6709 (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM", 6710 "Calling vkBeginCommandBuffer() on active CB %p before it has completed. " 6711 "You must check CB fence before this call.", 6712 commandBuffer); 6713 } 6714 clear_cmd_buf_and_mem_references(dev_data, pCB); 6715 if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) { 6716 // Secondary Command Buffer 6717 const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo; 6718 if (!pInfo) { 6719 skipCall |= 6720 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6721 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS", 6722 "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must have inheritance info.", 6723 reinterpret_cast<void *>(commandBuffer)); 6724 } else { 6725 if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) { 6726 if (!pInfo->renderPass) { // renderpass should NOT be null for a Secondary CB 6727 skipCall |= log_msg( 6728 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6729 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS", 6730 "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must specify a valid renderpass parameter.", 6731 reinterpret_cast<void *>(commandBuffer)); 6732 } 6733 if (!pInfo->framebuffer) { // framebuffer may be null for a Secondary CB, but this affects perf 6734 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, 6735 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6736 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, 6737 "DS", "vkBeginCommandBuffer(): Secondary Command Buffers (%p) may perform better if a " 6738 "valid framebuffer parameter is specified.", 6739 reinterpret_cast<void *>(commandBuffer)); 6740 } else { 6741 string errorString = ""; 6742 auto fbNode = dev_data->frameBufferMap.find(pInfo->framebuffer); 6743 if (fbNode != dev_data->frameBufferMap.end()) { 6744 VkRenderPass fbRP = fbNode->second.createInfo.renderPass; 6745 if (!verify_renderpass_compatibility(dev_data, fbRP, pInfo->renderPass, errorString)) { 6746 // renderPass that framebuffer was created with must be compatible with local renderPass 6747 skipCall |= 6748 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 6749 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6750 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, 6751 "DS", "vkBeginCommandBuffer(): Secondary Command " 6752 "Buffer (%p) renderPass (%#" PRIxLEAST64 ") is incompatible w/ framebuffer " 6753 "(%#" PRIxLEAST64 ") w/ render pass (%#" PRIxLEAST64 ") due to: %s", 6754 reinterpret_cast<void *>(commandBuffer), (uint64_t)(pInfo->renderPass), 6755 (uint64_t)(pInfo->framebuffer), (uint64_t)(fbRP), errorString.c_str()); 6756 } 6757 // Connect this framebuffer to this cmdBuffer 6758 fbNode->second.referencingCmdBuffers.insert(pCB->commandBuffer); 6759 } 6760 } 6761 } 6762 if ((pInfo->occlusionQueryEnable == VK_FALSE || 6763 dev_data->phys_dev_properties.features.occlusionQueryPrecise == VK_FALSE) && 6764 (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) { 6765 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 6766 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer), 6767 __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS", 6768 "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must not have " 6769 "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not " 6770 "support precise occlusion queries.", 6771 reinterpret_cast<void *>(commandBuffer)); 6772 } 6773 } 6774 if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) { 6775 auto rp_data = dev_data->renderPassMap.find(pInfo->renderPass); 6776 if (rp_data != dev_data->renderPassMap.end() && rp_data->second && rp_data->second->pCreateInfo) { 6777 if (pInfo->subpass >= rp_data->second->pCreateInfo->subpassCount) { 6778 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 6779 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__, 6780 DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS", 6781 "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must has a subpass index (%d) " 6782 "that is less than the number of subpasses (%d).", 6783 (void *)commandBuffer, pInfo->subpass, rp_data->second->pCreateInfo->subpassCount); 6784 } 6785 } 6786 } 6787 } 6788 if (CB_RECORDING == pCB->state) { 6789 skipCall |= 6790 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6791 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS", 6792 "vkBeginCommandBuffer(): Cannot call Begin on CB (%#" PRIxLEAST64 6793 ") in the RECORDING state. Must first call vkEndCommandBuffer().", 6794 (uint64_t)commandBuffer); 6795 } else if (CB_RECORDED == pCB->state) { 6796 VkCommandPool cmdPool = pCB->createInfo.commandPool; 6797 if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) { 6798 skipCall |= 6799 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6800 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS", 6801 "Call to vkBeginCommandBuffer() on command buffer (%#" PRIxLEAST64 6802 ") attempts to implicitly reset cmdBuffer created from command pool (%#" PRIxLEAST64 6803 ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.", 6804 (uint64_t)commandBuffer, (uint64_t)cmdPool); 6805 } 6806 resetCB(dev_data, commandBuffer); 6807 } 6808 // Set updated state here in case implicit reset occurs above 6809 pCB->state = CB_RECORDING; 6810 pCB->beginInfo = *pBeginInfo; 6811 if (pCB->beginInfo.pInheritanceInfo) { 6812 pCB->inheritanceInfo = *(pCB->beginInfo.pInheritanceInfo); 6813 pCB->beginInfo.pInheritanceInfo = &pCB->inheritanceInfo; 6814 } 6815 } else { 6816 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6817 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 6818 "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB %p!", (void *)commandBuffer); 6819 } 6820 lock.unlock(); 6821 if (skipCall) { 6822 return VK_ERROR_VALIDATION_FAILED_EXT; 6823 } 6824 VkResult result = dev_data->device_dispatch_table->BeginCommandBuffer(commandBuffer, pBeginInfo); 6825 6826 return result; 6827} 6828 6829VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(VkCommandBuffer commandBuffer) { 6830 bool skipCall = false; 6831 VkResult result = VK_SUCCESS; 6832 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6833 std::unique_lock<std::mutex> lock(global_lock); 6834 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6835 if (pCB) { 6836 if (pCB->state != CB_RECORDING) { 6837 skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkEndCommandBuffer()"); 6838 } 6839 for (auto query : pCB->activeQueries) { 6840 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 6841 DRAWSTATE_INVALID_QUERY, "DS", 6842 "Ending command buffer with in progress query: queryPool %" PRIu64 ", index %d", 6843 (uint64_t)(query.pool), query.index); 6844 } 6845 } 6846 if (!skipCall) { 6847 lock.unlock(); 6848 result = dev_data->device_dispatch_table->EndCommandBuffer(commandBuffer); 6849 lock.lock(); 6850 if (VK_SUCCESS == result) { 6851 pCB->state = CB_RECORDED; 6852 // Reset CB status flags 6853 pCB->status = 0; 6854 printCB(dev_data, commandBuffer); 6855 } 6856 } else { 6857 result = VK_ERROR_VALIDATION_FAILED_EXT; 6858 } 6859 lock.unlock(); 6860 return result; 6861} 6862 6863VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 6864vkResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) { 6865 bool skip_call = false; 6866 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6867 std::unique_lock<std::mutex> lock(global_lock); 6868 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6869 VkCommandPool cmdPool = pCB->createInfo.commandPool; 6870 if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) { 6871 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6872 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS", 6873 "Attempt to reset command buffer (%#" PRIxLEAST64 ") created from command pool (%#" PRIxLEAST64 6874 ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.", 6875 (uint64_t)commandBuffer, (uint64_t)cmdPool); 6876 } 6877 skip_call |= checkAndClearCommandBufferInFlight(dev_data, pCB, "reset"); 6878 lock.unlock(); 6879 if (skip_call) 6880 return VK_ERROR_VALIDATION_FAILED_EXT; 6881 VkResult result = dev_data->device_dispatch_table->ResetCommandBuffer(commandBuffer, flags); 6882 if (VK_SUCCESS == result) { 6883 lock.lock(); 6884 resetCB(dev_data, commandBuffer); 6885 lock.unlock(); 6886 } 6887 return result; 6888} 6889 6890#if MTMERGESOURCE 6891// TODO : For any vkCmdBind* calls that include an object which has mem bound to it, 6892// need to account for that mem now having binding to given commandBuffer 6893#endif 6894VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 6895vkCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) { 6896 bool skipCall = false; 6897 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6898 std::unique_lock<std::mutex> lock(global_lock); 6899 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6900 if (pCB) { 6901 skipCall |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()"); 6902 if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) { 6903 skipCall |= 6904 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, 6905 (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS", 6906 "Incorrectly binding compute pipeline (%#" PRIxLEAST64 ") during active RenderPass (%#" PRIxLEAST64 ")", 6907 (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass); 6908 } 6909 6910 PIPELINE_NODE *pPN = getPipeline(dev_data, pipeline); 6911 if (pPN) { 6912 pCB->lastBound[pipelineBindPoint].pipeline = pipeline; 6913 set_cb_pso_status(pCB, pPN); 6914 set_pipeline_state(pPN); 6915 skipCall |= validatePipelineState(dev_data, pCB, pipelineBindPoint, pipeline); 6916 } else { 6917 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, 6918 (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS", 6919 "Attempt to bind Pipeline %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline)); 6920 } 6921 } 6922 lock.unlock(); 6923 if (!skipCall) 6924 dev_data->device_dispatch_table->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline); 6925} 6926 6927VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 6928vkCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) { 6929 bool skipCall = false; 6930 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6931 std::unique_lock<std::mutex> lock(global_lock); 6932 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6933 if (pCB) { 6934 skipCall |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()"); 6935 pCB->status |= CBSTATUS_VIEWPORT_SET; 6936 pCB->viewports.resize(viewportCount); 6937 memcpy(pCB->viewports.data(), pViewports, viewportCount * sizeof(VkViewport)); 6938 } 6939 lock.unlock(); 6940 if (!skipCall) 6941 dev_data->device_dispatch_table->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports); 6942} 6943 6944VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 6945vkCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) { 6946 bool skipCall = false; 6947 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6948 std::unique_lock<std::mutex> lock(global_lock); 6949 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6950 if (pCB) { 6951 skipCall |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()"); 6952 pCB->status |= CBSTATUS_SCISSOR_SET; 6953 pCB->scissors.resize(scissorCount); 6954 memcpy(pCB->scissors.data(), pScissors, scissorCount * sizeof(VkRect2D)); 6955 } 6956 lock.unlock(); 6957 if (!skipCall) 6958 dev_data->device_dispatch_table->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors); 6959} 6960 6961VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) { 6962 bool skipCall = false; 6963 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6964 std::unique_lock<std::mutex> lock(global_lock); 6965 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6966 if (pCB) { 6967 skipCall |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()"); 6968 pCB->status |= CBSTATUS_LINE_WIDTH_SET; 6969 } 6970 lock.unlock(); 6971 if (!skipCall) 6972 dev_data->device_dispatch_table->CmdSetLineWidth(commandBuffer, lineWidth); 6973} 6974 6975VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 6976vkCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) { 6977 bool skipCall = false; 6978 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6979 std::unique_lock<std::mutex> lock(global_lock); 6980 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6981 if (pCB) { 6982 skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()"); 6983 pCB->status |= CBSTATUS_DEPTH_BIAS_SET; 6984 } 6985 lock.unlock(); 6986 if (!skipCall) 6987 dev_data->device_dispatch_table->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, 6988 depthBiasSlopeFactor); 6989} 6990 6991VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) { 6992 bool skipCall = false; 6993 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6994 std::unique_lock<std::mutex> lock(global_lock); 6995 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6996 if (pCB) { 6997 skipCall |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()"); 6998 pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET; 6999 } 7000 lock.unlock(); 7001 if (!skipCall) 7002 dev_data->device_dispatch_table->CmdSetBlendConstants(commandBuffer, blendConstants); 7003} 7004 7005VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 7006vkCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) { 7007 bool skipCall = false; 7008 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7009 std::unique_lock<std::mutex> lock(global_lock); 7010 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7011 if (pCB) { 7012 skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()"); 7013 pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET; 7014 } 7015 lock.unlock(); 7016 if (!skipCall) 7017 dev_data->device_dispatch_table->CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds); 7018} 7019 7020VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 7021vkCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) { 7022 bool skipCall = false; 7023 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7024 std::unique_lock<std::mutex> lock(global_lock); 7025 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7026 if (pCB) { 7027 skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()"); 7028 pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET; 7029 } 7030 lock.unlock(); 7031 if (!skipCall) 7032 dev_data->device_dispatch_table->CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask); 7033} 7034 7035VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 7036vkCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) { 7037 bool skipCall = false; 7038 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7039 std::unique_lock<std::mutex> lock(global_lock); 7040 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7041 if (pCB) { 7042 skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()"); 7043 pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET; 7044 } 7045 lock.unlock(); 7046 if (!skipCall) 7047 dev_data->device_dispatch_table->CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask); 7048} 7049 7050VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 7051vkCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) { 7052 bool skipCall = false; 7053 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7054 std::unique_lock<std::mutex> lock(global_lock); 7055 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7056 if (pCB) { 7057 skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()"); 7058 pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET; 7059 } 7060 lock.unlock(); 7061 if (!skipCall) 7062 dev_data->device_dispatch_table->CmdSetStencilReference(commandBuffer, faceMask, reference); 7063} 7064 7065VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 7066vkCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, 7067 uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount, 7068 const uint32_t *pDynamicOffsets) { 7069 bool skipCall = false; 7070 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7071 std::unique_lock<std::mutex> lock(global_lock); 7072 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7073 if (pCB) { 7074 if (pCB->state == CB_RECORDING) { 7075 // Track total count of dynamic descriptor types to make sure we have an offset for each one 7076 uint32_t totalDynamicDescriptors = 0; 7077 string errorString = ""; 7078 uint32_t lastSetIndex = firstSet + setCount - 1; 7079 if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size()) 7080 pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1); 7081 VkDescriptorSet oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex]; 7082 for (uint32_t i = 0; i < setCount; i++) { 7083 SET_NODE *pSet = getSetNode(dev_data, pDescriptorSets[i]); 7084 if (pSet) { 7085 pCB->lastBound[pipelineBindPoint].uniqueBoundSets.insert(pDescriptorSets[i]); 7086 pSet->boundCmdBuffers.insert(commandBuffer); 7087 pCB->lastBound[pipelineBindPoint].pipelineLayout = layout; 7088 pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pDescriptorSets[i]; 7089 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 7090 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__, 7091 DRAWSTATE_NONE, "DS", "DS %#" PRIxLEAST64 " bound on pipeline %s", 7092 (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint)); 7093 if (!pSet->pUpdateStructs && (pSet->descriptorCount != 0)) { 7094 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, 7095 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], 7096 __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS", 7097 "DS %#" PRIxLEAST64 7098 " bound but it was never updated. You may want to either update it or not bind it.", 7099 (uint64_t)pDescriptorSets[i]); 7100 } 7101 // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout 7102 if (!verify_set_layout_compatibility(dev_data, pSet, layout, i + firstSet, errorString)) { 7103 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 7104 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], 7105 __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS", 7106 "descriptorSet #%u being bound is not compatible with overlapping layout in " 7107 "pipelineLayout due to: %s", 7108 i, errorString.c_str()); 7109 } 7110 if (pSet->pLayout->dynamicDescriptorCount) { 7111 // First make sure we won't overstep bounds of pDynamicOffsets array 7112 if ((totalDynamicDescriptors + pSet->pLayout->dynamicDescriptorCount) > dynamicOffsetCount) { 7113 skipCall |= 7114 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 7115 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__, 7116 DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS", 7117 "descriptorSet #%u (%#" PRIxLEAST64 7118 ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets " 7119 "array. There must be one dynamic offset for each dynamic descriptor being bound.", 7120 i, (uint64_t)pDescriptorSets[i], pSet->pLayout->dynamicDescriptorCount, 7121 (dynamicOffsetCount - totalDynamicDescriptors)); 7122 } else { // Validate and store dynamic offsets with the set 7123 // Validate Dynamic Offset Minimums 7124 uint32_t cur_dyn_offset = totalDynamicDescriptors; 7125 for (uint32_t d = 0; d < pSet->descriptorCount; d++) { 7126 if (pSet->pLayout->descriptorTypes[d] == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) { 7127 if (vk_safe_modulo( 7128 pDynamicOffsets[cur_dyn_offset], 7129 dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) { 7130 skipCall |= log_msg( 7131 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 7132 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, 7133 DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS", 7134 "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of " 7135 "device limit minUniformBufferOffsetAlignment %#" PRIxLEAST64, 7136 cur_dyn_offset, pDynamicOffsets[cur_dyn_offset], 7137 dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment); 7138 } 7139 cur_dyn_offset++; 7140 } else if (pSet->pLayout->descriptorTypes[d] == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) { 7141 if (vk_safe_modulo( 7142 pDynamicOffsets[cur_dyn_offset], 7143 dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) { 7144 skipCall |= log_msg( 7145 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 7146 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, 7147 DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS", 7148 "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of " 7149 "device limit minStorageBufferOffsetAlignment %#" PRIxLEAST64, 7150 cur_dyn_offset, pDynamicOffsets[cur_dyn_offset], 7151 dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment); 7152 } 7153 cur_dyn_offset++; 7154 } 7155 } 7156 // Keep running total of dynamic descriptor count to verify at the end 7157 totalDynamicDescriptors += pSet->pLayout->dynamicDescriptorCount; 7158 } 7159 } 7160 } else { 7161 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 7162 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__, 7163 DRAWSTATE_INVALID_SET, "DS", "Attempt to bind DS %#" PRIxLEAST64 " that doesn't exist!", 7164 (uint64_t)pDescriptorSets[i]); 7165 } 7166 skipCall |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()"); 7167 // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update 7168 if (firstSet > 0) { // Check set #s below the first bound set 7169 for (uint32_t i = 0; i < firstSet; ++i) { 7170 if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] && 7171 !verify_set_layout_compatibility( 7172 dev_data, dev_data->setMap[pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i]], layout, i, 7173 errorString)) { 7174 skipCall |= log_msg( 7175 dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, 7176 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 7177 (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS", 7178 "DescriptorSetDS %#" PRIxLEAST64 7179 " previously bound as set #%u was disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")", 7180 (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout); 7181 pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE; 7182 } 7183 } 7184 } 7185 // Check if newly last bound set invalidates any remaining bound sets 7186 if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) { 7187 if (oldFinalBoundSet && 7188 !verify_set_layout_compatibility(dev_data, dev_data->setMap[oldFinalBoundSet], layout, lastSetIndex, 7189 errorString)) { 7190 skipCall |= 7191 log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, 7192 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)oldFinalBoundSet, __LINE__, 7193 DRAWSTATE_NONE, "DS", "DescriptorSetDS %#" PRIxLEAST64 7194 " previously bound as set #%u is incompatible with set %#" PRIxLEAST64 7195 " newly bound as set #%u so set #%u and any subsequent sets were " 7196 "disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")", 7197 (uint64_t)oldFinalBoundSet, lastSetIndex, 7198 (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex, 7199 lastSetIndex + 1, (uint64_t)layout); 7200 pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1); 7201 } 7202 } 7203 } 7204 // dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound 7205 if (totalDynamicDescriptors != dynamicOffsetCount) { 7206 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 7207 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__, 7208 DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS", 7209 "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount " 7210 "is %u. It should exactly match the number of dynamic descriptors.", 7211 setCount, totalDynamicDescriptors, dynamicOffsetCount); 7212 } 7213 // Save dynamicOffsets bound to this CB 7214 for (uint32_t i = 0; i < dynamicOffsetCount; i++) { 7215 pCB->lastBound[pipelineBindPoint].dynamicOffsets.emplace_back(pDynamicOffsets[i]); 7216 } 7217 } else { 7218 skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()"); 7219 } 7220 } 7221 lock.unlock(); 7222 if (!skipCall) 7223 dev_data->device_dispatch_table->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount, 7224 pDescriptorSets, dynamicOffsetCount, pDynamicOffsets); 7225} 7226 7227VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 7228vkCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) { 7229 bool skipCall = false; 7230 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7231 std::unique_lock<std::mutex> lock(global_lock); 7232#if MTMERGESOURCE 7233 VkDeviceMemory mem; 7234 skipCall = 7235 get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem); 7236 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 7237 if (cb_data != dev_data->commandBufferMap.end()) { 7238 std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindIndexBuffer()"); }; 7239 cb_data->second->validate_functions.push_back(function); 7240 } 7241 // TODO : Somewhere need to verify that IBs have correct usage state flagged 7242#endif 7243 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7244 if (pCB) { 7245 skipCall |= addCmd(dev_data, pCB, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()"); 7246 VkDeviceSize offset_align = 0; 7247 switch (indexType) { 7248 case VK_INDEX_TYPE_UINT16: 7249 offset_align = 2; 7250 break; 7251 case VK_INDEX_TYPE_UINT32: 7252 offset_align = 4; 7253 break; 7254 default: 7255 // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0 7256 break; 7257 } 7258 if (!offset_align || (offset % offset_align)) { 7259 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7260 DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS", 7261 "vkCmdBindIndexBuffer() offset (%#" PRIxLEAST64 ") does not fall on alignment (%s) boundary.", 7262 offset, string_VkIndexType(indexType)); 7263 } 7264 pCB->status |= CBSTATUS_INDEX_BUFFER_BOUND; 7265 } 7266 lock.unlock(); 7267 if (!skipCall) 7268 dev_data->device_dispatch_table->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType); 7269} 7270 7271void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) { 7272 uint32_t end = firstBinding + bindingCount; 7273 if (pCB->currentDrawData.buffers.size() < end) { 7274 pCB->currentDrawData.buffers.resize(end); 7275 } 7276 for (uint32_t i = 0; i < bindingCount; ++i) { 7277 pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i]; 7278 } 7279} 7280 7281static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); } 7282 7283VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, 7284 uint32_t bindingCount, const VkBuffer *pBuffers, 7285 const VkDeviceSize *pOffsets) { 7286 bool skipCall = false; 7287 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7288 std::unique_lock<std::mutex> lock(global_lock); 7289#if MTMERGESOURCE 7290 for (uint32_t i = 0; i < bindingCount; ++i) { 7291 VkDeviceMemory mem; 7292 skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)pBuffers[i], VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem); 7293 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 7294 if (cb_data != dev_data->commandBufferMap.end()) { 7295 std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindVertexBuffers()"); }; 7296 cb_data->second->validate_functions.push_back(function); 7297 } 7298 } 7299 // TODO : Somewhere need to verify that VBs have correct usage state flagged 7300#endif 7301 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7302 if (pCB) { 7303 addCmd(dev_data, pCB, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()"); 7304 updateResourceTracking(pCB, firstBinding, bindingCount, pBuffers); 7305 } else { 7306 skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()"); 7307 } 7308 lock.unlock(); 7309 if (!skipCall) 7310 dev_data->device_dispatch_table->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets); 7311} 7312 7313/* expects global_lock to be held by caller */ 7314static bool markStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) { 7315 bool skip_call = false; 7316 7317 for (auto imageView : pCB->updateImages) { 7318 auto iv_data = dev_data->imageViewMap.find(imageView); 7319 if (iv_data == dev_data->imageViewMap.end()) 7320 continue; 7321 VkImage image = iv_data->second.image; 7322 VkDeviceMemory mem; 7323 skip_call |= 7324 get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem); 7325 std::function<bool()> function = [=]() { 7326 set_memory_valid(dev_data, mem, true, image); 7327 return false; 7328 }; 7329 pCB->validate_functions.push_back(function); 7330 } 7331 for (auto buffer : pCB->updateBuffers) { 7332 VkDeviceMemory mem; 7333 skip_call |= get_mem_binding_from_object(dev_data, (uint64_t)buffer, 7334 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem); 7335 std::function<bool()> function = [=]() { 7336 set_memory_valid(dev_data, mem, true); 7337 return false; 7338 }; 7339 pCB->validate_functions.push_back(function); 7340 } 7341 return skip_call; 7342} 7343 7344VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, 7345 uint32_t firstVertex, uint32_t firstInstance) { 7346 bool skipCall = false; 7347 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7348 std::unique_lock<std::mutex> lock(global_lock); 7349 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7350 if (pCB) { 7351 skipCall |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()"); 7352 pCB->drawCount[DRAW]++; 7353 skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS); 7354 skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB); 7355 // TODO : Need to pass commandBuffer as srcObj here 7356 skipCall |= 7357 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 7358 __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW]++); 7359 skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer); 7360 if (!skipCall) { 7361 updateResourceTrackingOnDraw(pCB); 7362 } 7363 skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDraw"); 7364 } 7365 lock.unlock(); 7366 if (!skipCall) 7367 dev_data->device_dispatch_table->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance); 7368} 7369 7370VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, 7371 uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, 7372 uint32_t firstInstance) { 7373 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7374 bool skipCall = false; 7375 std::unique_lock<std::mutex> lock(global_lock); 7376 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7377 if (pCB) { 7378 skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()"); 7379 pCB->drawCount[DRAW_INDEXED]++; 7380 skipCall |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS); 7381 skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB); 7382 // TODO : Need to pass commandBuffer as srcObj here 7383 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 7384 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS", 7385 "vkCmdDrawIndexed() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++); 7386 skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer); 7387 if (!skipCall) { 7388 updateResourceTrackingOnDraw(pCB); 7389 } 7390 skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed"); 7391 } 7392 lock.unlock(); 7393 if (!skipCall) 7394 dev_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, 7395 firstInstance); 7396} 7397 7398VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 7399vkCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) { 7400 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7401 bool skipCall = false; 7402 std::unique_lock<std::mutex> lock(global_lock); 7403#if MTMERGESOURCE 7404 VkDeviceMemory mem; 7405 // MTMTODO : merge with code below 7406 skipCall = 7407 get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem); 7408 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndirect"); 7409#endif 7410 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7411 if (pCB) { 7412 skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()"); 7413 pCB->drawCount[DRAW_INDIRECT]++; 7414 skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS); 7415 skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB); 7416 // TODO : Need to pass commandBuffer as srcObj here 7417 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 7418 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS", 7419 "vkCmdDrawIndirect() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++); 7420 skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer); 7421 if (!skipCall) { 7422 updateResourceTrackingOnDraw(pCB); 7423 } 7424 skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndirect"); 7425 } 7426 lock.unlock(); 7427 if (!skipCall) 7428 dev_data->device_dispatch_table->CmdDrawIndirect(commandBuffer, buffer, offset, count, stride); 7429} 7430 7431VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 7432vkCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) { 7433 bool skipCall = false; 7434 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7435 std::unique_lock<std::mutex> lock(global_lock); 7436#if MTMERGESOURCE 7437 VkDeviceMemory mem; 7438 // MTMTODO : merge with code below 7439 skipCall = 7440 get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem); 7441 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndexedIndirect"); 7442#endif 7443 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7444 if (pCB) { 7445 skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()"); 7446 pCB->drawCount[DRAW_INDEXED_INDIRECT]++; 7447 skipCall |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS); 7448 skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB); 7449 // TODO : Need to pass commandBuffer as srcObj here 7450 skipCall |= 7451 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 7452 __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call #%" PRIu64 ", reporting DS state:", 7453 g_drawCount[DRAW_INDEXED_INDIRECT]++); 7454 skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer); 7455 if (!skipCall) { 7456 updateResourceTrackingOnDraw(pCB); 7457 } 7458 skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexedIndirect"); 7459 } 7460 lock.unlock(); 7461 if (!skipCall) 7462 dev_data->device_dispatch_table->CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride); 7463} 7464 7465VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) { 7466 bool skipCall = false; 7467 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7468 std::unique_lock<std::mutex> lock(global_lock); 7469 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7470 if (pCB) { 7471 // TODO : Re-enable validate_and_update_draw_state() when it supports compute shaders 7472 // skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE); 7473 // TODO : Call below is temporary until call above can be re-enabled 7474 update_shader_storage_images_and_buffers(dev_data, pCB); 7475 skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB); 7476 skipCall |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()"); 7477 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatch"); 7478 } 7479 lock.unlock(); 7480 if (!skipCall) 7481 dev_data->device_dispatch_table->CmdDispatch(commandBuffer, x, y, z); 7482} 7483 7484VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 7485vkCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) { 7486 bool skipCall = false; 7487 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7488 std::unique_lock<std::mutex> lock(global_lock); 7489#if MTMERGESOURCE 7490 VkDeviceMemory mem; 7491 skipCall = 7492 get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem); 7493 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDispatchIndirect"); 7494#endif 7495 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7496 if (pCB) { 7497 // TODO : Re-enable validate_and_update_draw_state() when it supports compute shaders 7498 // skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE); 7499 // TODO : Call below is temporary until call above can be re-enabled 7500 update_shader_storage_images_and_buffers(dev_data, pCB); 7501 skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB); 7502 skipCall |= addCmd(dev_data, pCB, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()"); 7503 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatchIndirect"); 7504 } 7505 lock.unlock(); 7506 if (!skipCall) 7507 dev_data->device_dispatch_table->CmdDispatchIndirect(commandBuffer, buffer, offset); 7508} 7509 7510VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, 7511 uint32_t regionCount, const VkBufferCopy *pRegions) { 7512 bool skipCall = false; 7513 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7514 std::unique_lock<std::mutex> lock(global_lock); 7515#if MTMERGESOURCE 7516 VkDeviceMemory mem; 7517 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 7518 skipCall = 7519 get_mem_binding_from_object(dev_data, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem); 7520 if (cb_data != dev_data->commandBufferMap.end()) { 7521 std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyBuffer()"); }; 7522 cb_data->second->validate_functions.push_back(function); 7523 } 7524 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBuffer"); 7525 skipCall |= 7526 get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem); 7527 if (cb_data != dev_data->commandBufferMap.end()) { 7528 std::function<bool()> function = [=]() { 7529 set_memory_valid(dev_data, mem, true); 7530 return false; 7531 }; 7532 cb_data->second->validate_functions.push_back(function); 7533 } 7534 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBuffer"); 7535 // Validate that SRC & DST buffers have correct usage flags set 7536 skipCall |= validate_buffer_usage_flags(dev_data, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, 7537 "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT"); 7538 skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, 7539 "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); 7540#endif 7541 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7542 if (pCB) { 7543 skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFER, "vkCmdCopyBuffer()"); 7544 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBuffer"); 7545 } 7546 lock.unlock(); 7547 if (!skipCall) 7548 dev_data->device_dispatch_table->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions); 7549} 7550 7551static bool VerifySourceImageLayout(VkCommandBuffer cmdBuffer, VkImage srcImage, VkImageSubresourceLayers subLayers, 7552 VkImageLayout srcImageLayout) { 7553 bool skip_call = false; 7554 7555 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map); 7556 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer); 7557 for (uint32_t i = 0; i < subLayers.layerCount; ++i) { 7558 uint32_t layer = i + subLayers.baseArrayLayer; 7559 VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer}; 7560 IMAGE_CMD_BUF_LAYOUT_NODE node; 7561 if (!FindLayout(pCB, srcImage, sub, node)) { 7562 SetLayout(pCB, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout)); 7563 continue; 7564 } 7565 if (node.layout != srcImageLayout) { 7566 // TODO: Improve log message in the next pass 7567 skip_call |= 7568 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 7569 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s " 7570 "and doesn't match the current layout %s.", 7571 string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout)); 7572 } 7573 } 7574 if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) { 7575 if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) { 7576 // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning. 7577 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 7578 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 7579 "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL."); 7580 } else { 7581 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7582 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be " 7583 "TRANSFER_SRC_OPTIMAL or GENERAL.", 7584 string_VkImageLayout(srcImageLayout)); 7585 } 7586 } 7587 return skip_call; 7588} 7589 7590static bool VerifyDestImageLayout(VkCommandBuffer cmdBuffer, VkImage destImage, VkImageSubresourceLayers subLayers, 7591 VkImageLayout destImageLayout) { 7592 bool skip_call = false; 7593 7594 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map); 7595 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer); 7596 for (uint32_t i = 0; i < subLayers.layerCount; ++i) { 7597 uint32_t layer = i + subLayers.baseArrayLayer; 7598 VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer}; 7599 IMAGE_CMD_BUF_LAYOUT_NODE node; 7600 if (!FindLayout(pCB, destImage, sub, node)) { 7601 SetLayout(pCB, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout)); 7602 continue; 7603 } 7604 if (node.layout != destImageLayout) { 7605 skip_call |= 7606 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 7607 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and " 7608 "doesn't match the current layout %s.", 7609 string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout)); 7610 } 7611 } 7612 if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) { 7613 if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) { 7614 // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning. 7615 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 7616 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 7617 "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL."); 7618 } else { 7619 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7620 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be " 7621 "TRANSFER_DST_OPTIMAL or GENERAL.", 7622 string_VkImageLayout(destImageLayout)); 7623 } 7624 } 7625 return skip_call; 7626} 7627 7628VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 7629vkCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, 7630 VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) { 7631 bool skipCall = false; 7632 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7633 std::unique_lock<std::mutex> lock(global_lock); 7634#if MTMERGESOURCE 7635 VkDeviceMemory mem; 7636 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 7637 // Validate that src & dst images have correct usage flags set 7638 skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem); 7639 if (cb_data != dev_data->commandBufferMap.end()) { 7640 std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyImage()", srcImage); }; 7641 cb_data->second->validate_functions.push_back(function); 7642 } 7643 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImage"); 7644 skipCall |= 7645 get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem); 7646 if (cb_data != dev_data->commandBufferMap.end()) { 7647 std::function<bool()> function = [=]() { 7648 set_memory_valid(dev_data, mem, true, dstImage); 7649 return false; 7650 }; 7651 cb_data->second->validate_functions.push_back(function); 7652 } 7653 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImage"); 7654 skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, 7655 "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT"); 7656 skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, 7657 "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT"); 7658#endif 7659 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7660 if (pCB) { 7661 skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGE, "vkCmdCopyImage()"); 7662 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImage"); 7663 for (uint32_t i = 0; i < regionCount; ++i) { 7664 skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].srcSubresource, srcImageLayout); 7665 skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].dstSubresource, dstImageLayout); 7666 } 7667 } 7668 lock.unlock(); 7669 if (!skipCall) 7670 dev_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, 7671 regionCount, pRegions); 7672} 7673 7674VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 7675vkCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, 7676 VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) { 7677 bool skipCall = false; 7678 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7679 std::unique_lock<std::mutex> lock(global_lock); 7680#if MTMERGESOURCE 7681 VkDeviceMemory mem; 7682 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 7683 // Validate that src & dst images have correct usage flags set 7684 skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem); 7685 if (cb_data != dev_data->commandBufferMap.end()) { 7686 std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBlitImage()", srcImage); }; 7687 cb_data->second->validate_functions.push_back(function); 7688 } 7689 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdBlitImage"); 7690 skipCall |= 7691 get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem); 7692 if (cb_data != dev_data->commandBufferMap.end()) { 7693 std::function<bool()> function = [=]() { 7694 set_memory_valid(dev_data, mem, true, dstImage); 7695 return false; 7696 }; 7697 cb_data->second->validate_functions.push_back(function); 7698 } 7699 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdBlitImage"); 7700 skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, 7701 "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT"); 7702 skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, 7703 "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT"); 7704#endif 7705 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7706 if (pCB) { 7707 skipCall |= addCmd(dev_data, pCB, CMD_BLITIMAGE, "vkCmdBlitImage()"); 7708 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBlitImage"); 7709 } 7710 lock.unlock(); 7711 if (!skipCall) 7712 dev_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, 7713 regionCount, pRegions, filter); 7714} 7715 7716VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, 7717 VkImage dstImage, VkImageLayout dstImageLayout, 7718 uint32_t regionCount, const VkBufferImageCopy *pRegions) { 7719 bool skipCall = false; 7720 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7721 std::unique_lock<std::mutex> lock(global_lock); 7722#if MTMERGESOURCE 7723 VkDeviceMemory mem; 7724 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 7725 skipCall = get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem); 7726 if (cb_data != dev_data->commandBufferMap.end()) { 7727 std::function<bool()> function = [=]() { 7728 set_memory_valid(dev_data, mem, true, dstImage); 7729 return false; 7730 }; 7731 cb_data->second->validate_functions.push_back(function); 7732 } 7733 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBufferToImage"); 7734 skipCall |= 7735 get_mem_binding_from_object(dev_data, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem); 7736 if (cb_data != dev_data->commandBufferMap.end()) { 7737 std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyBufferToImage()"); }; 7738 cb_data->second->validate_functions.push_back(function); 7739 } 7740 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBufferToImage"); 7741 // Validate that src buff & dst image have correct usage flags set 7742 skipCall |= validate_buffer_usage_flags(dev_data, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, 7743 "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT"); 7744 skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, 7745 "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT"); 7746#endif 7747 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7748 if (pCB) { 7749 skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()"); 7750 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBufferToImage"); 7751 for (uint32_t i = 0; i < regionCount; ++i) { 7752 skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].imageSubresource, dstImageLayout); 7753 } 7754 } 7755 lock.unlock(); 7756 if (!skipCall) 7757 dev_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, 7758 pRegions); 7759} 7760 7761VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, 7762 VkImageLayout srcImageLayout, VkBuffer dstBuffer, 7763 uint32_t regionCount, const VkBufferImageCopy *pRegions) { 7764 bool skipCall = false; 7765 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7766 std::unique_lock<std::mutex> lock(global_lock); 7767#if MTMERGESOURCE 7768 VkDeviceMemory mem; 7769 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 7770 skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem); 7771 if (cb_data != dev_data->commandBufferMap.end()) { 7772 std::function<bool()> function = [=]() { 7773 return validate_memory_is_valid(dev_data, mem, "vkCmdCopyImageToBuffer()", srcImage); 7774 }; 7775 cb_data->second->validate_functions.push_back(function); 7776 } 7777 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImageToBuffer"); 7778 skipCall |= 7779 get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem); 7780 if (cb_data != dev_data->commandBufferMap.end()) { 7781 std::function<bool()> function = [=]() { 7782 set_memory_valid(dev_data, mem, true); 7783 return false; 7784 }; 7785 cb_data->second->validate_functions.push_back(function); 7786 } 7787 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImageToBuffer"); 7788 // Validate that dst buff & src image have correct usage flags set 7789 skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, 7790 "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT"); 7791 skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, 7792 "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); 7793#endif 7794 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7795 if (pCB) { 7796 skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()"); 7797 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImageToBuffer"); 7798 for (uint32_t i = 0; i < regionCount; ++i) { 7799 skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].imageSubresource, srcImageLayout); 7800 } 7801 } 7802 lock.unlock(); 7803 if (!skipCall) 7804 dev_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, 7805 pRegions); 7806} 7807 7808VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, 7809 VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) { 7810 bool skipCall = false; 7811 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7812 std::unique_lock<std::mutex> lock(global_lock); 7813#if MTMERGESOURCE 7814 VkDeviceMemory mem; 7815 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 7816 skipCall = 7817 get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem); 7818 if (cb_data != dev_data->commandBufferMap.end()) { 7819 std::function<bool()> function = [=]() { 7820 set_memory_valid(dev_data, mem, true); 7821 return false; 7822 }; 7823 cb_data->second->validate_functions.push_back(function); 7824 } 7825 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdUpdateBuffer"); 7826 // Validate that dst buff has correct usage flags set 7827 skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, 7828 "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); 7829#endif 7830 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7831 if (pCB) { 7832 skipCall |= addCmd(dev_data, pCB, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()"); 7833 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyUpdateBuffer"); 7834 } 7835 lock.unlock(); 7836 if (!skipCall) 7837 dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData); 7838} 7839 7840VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 7841vkCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) { 7842 bool skipCall = false; 7843 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7844 std::unique_lock<std::mutex> lock(global_lock); 7845#if MTMERGESOURCE 7846 VkDeviceMemory mem; 7847 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 7848 skipCall = 7849 get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem); 7850 if (cb_data != dev_data->commandBufferMap.end()) { 7851 std::function<bool()> function = [=]() { 7852 set_memory_valid(dev_data, mem, true); 7853 return false; 7854 }; 7855 cb_data->second->validate_functions.push_back(function); 7856 } 7857 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdFillBuffer"); 7858 // Validate that dst buff has correct usage flags set 7859 skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, 7860 "vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); 7861#endif 7862 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7863 if (pCB) { 7864 skipCall |= addCmd(dev_data, pCB, CMD_FILLBUFFER, "vkCmdFillBuffer()"); 7865 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyFillBuffer"); 7866 } 7867 lock.unlock(); 7868 if (!skipCall) 7869 dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data); 7870} 7871 7872VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount, 7873 const VkClearAttachment *pAttachments, uint32_t rectCount, 7874 const VkClearRect *pRects) { 7875 bool skipCall = false; 7876 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7877 std::unique_lock<std::mutex> lock(global_lock); 7878 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7879 if (pCB) { 7880 skipCall |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()"); 7881 // Warn if this is issued prior to Draw Cmd and clearing the entire attachment 7882 if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) && 7883 (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) { 7884 // TODO : commandBuffer should be srcObj 7885 // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass) 7886 // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must 7887 // call CmdClearAttachments 7888 // Otherwise this seems more like a performance warning. 7889 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, 7890 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS", 7891 "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds." 7892 " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.", 7893 (uint64_t)(commandBuffer)); 7894 } 7895 skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments"); 7896 } 7897 7898 // Validate that attachment is in reference list of active subpass 7899 if (pCB->activeRenderPass) { 7900 const VkRenderPassCreateInfo *pRPCI = dev_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo; 7901 const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass]; 7902 7903 for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) { 7904 const VkClearAttachment *attachment = &pAttachments[attachment_idx]; 7905 if (attachment->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) { 7906 bool found = false; 7907 for (uint32_t i = 0; i < pSD->colorAttachmentCount; i++) { 7908 if (attachment->colorAttachment == pSD->pColorAttachments[i].attachment) { 7909 found = true; 7910 break; 7911 } 7912 } 7913 if (!found) { 7914 skipCall |= log_msg( 7915 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 7916 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS", 7917 "vkCmdClearAttachments() attachment index %d not found in attachment reference array of active subpass %d", 7918 attachment->colorAttachment, pCB->activeSubpass); 7919 } 7920 } else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) { 7921 if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass 7922 (pSD->pDepthStencilAttachment->attachment == 7923 VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass 7924 7925 skipCall |= log_msg( 7926 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 7927 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS", 7928 "vkCmdClearAttachments() attachment index %d does not match depthStencilAttachment.attachment (%d) found " 7929 "in active subpass %d", 7930 attachment->colorAttachment, 7931 (pSD->pDepthStencilAttachment) ? pSD->pDepthStencilAttachment->attachment : VK_ATTACHMENT_UNUSED, 7932 pCB->activeSubpass); 7933 } 7934 } 7935 } 7936 } 7937 lock.unlock(); 7938 if (!skipCall) 7939 dev_data->device_dispatch_table->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects); 7940} 7941 7942VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, 7943 VkImageLayout imageLayout, const VkClearColorValue *pColor, 7944 uint32_t rangeCount, const VkImageSubresourceRange *pRanges) { 7945 bool skipCall = false; 7946 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7947 std::unique_lock<std::mutex> lock(global_lock); 7948#if MTMERGESOURCE 7949 // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state 7950 VkDeviceMemory mem; 7951 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 7952 skipCall = get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem); 7953 if (cb_data != dev_data->commandBufferMap.end()) { 7954 std::function<bool()> function = [=]() { 7955 set_memory_valid(dev_data, mem, true, image); 7956 return false; 7957 }; 7958 cb_data->second->validate_functions.push_back(function); 7959 } 7960 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearColorImage"); 7961#endif 7962 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7963 if (pCB) { 7964 skipCall |= addCmd(dev_data, pCB, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()"); 7965 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearColorImage"); 7966 } 7967 lock.unlock(); 7968 if (!skipCall) 7969 dev_data->device_dispatch_table->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges); 7970} 7971 7972VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 7973vkCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, 7974 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount, 7975 const VkImageSubresourceRange *pRanges) { 7976 bool skipCall = false; 7977 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7978 std::unique_lock<std::mutex> lock(global_lock); 7979#if MTMERGESOURCE 7980 // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state 7981 VkDeviceMemory mem; 7982 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 7983 skipCall = get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem); 7984 if (cb_data != dev_data->commandBufferMap.end()) { 7985 std::function<bool()> function = [=]() { 7986 set_memory_valid(dev_data, mem, true, image); 7987 return false; 7988 }; 7989 cb_data->second->validate_functions.push_back(function); 7990 } 7991 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearDepthStencilImage"); 7992#endif 7993 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7994 if (pCB) { 7995 skipCall |= addCmd(dev_data, pCB, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()"); 7996 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearDepthStencilImage"); 7997 } 7998 lock.unlock(); 7999 if (!skipCall) 8000 dev_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, 8001 pRanges); 8002} 8003 8004VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 8005vkCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, 8006 VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) { 8007 bool skipCall = false; 8008 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8009 std::unique_lock<std::mutex> lock(global_lock); 8010#if MTMERGESOURCE 8011 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 8012 VkDeviceMemory mem; 8013 skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem); 8014 if (cb_data != dev_data->commandBufferMap.end()) { 8015 std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdResolveImage()", srcImage); }; 8016 cb_data->second->validate_functions.push_back(function); 8017 } 8018 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdResolveImage"); 8019 skipCall |= 8020 get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem); 8021 if (cb_data != dev_data->commandBufferMap.end()) { 8022 std::function<bool()> function = [=]() { 8023 set_memory_valid(dev_data, mem, true, dstImage); 8024 return false; 8025 }; 8026 cb_data->second->validate_functions.push_back(function); 8027 } 8028 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdResolveImage"); 8029#endif 8030 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8031 if (pCB) { 8032 skipCall |= addCmd(dev_data, pCB, CMD_RESOLVEIMAGE, "vkCmdResolveImage()"); 8033 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResolveImage"); 8034 } 8035 lock.unlock(); 8036 if (!skipCall) 8037 dev_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, 8038 regionCount, pRegions); 8039} 8040 8041bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) { 8042 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8043 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8044 if (pCB) { 8045 pCB->eventToStageMap[event] = stageMask; 8046 } 8047 auto queue_data = dev_data->queueMap.find(queue); 8048 if (queue_data != dev_data->queueMap.end()) { 8049 queue_data->second.eventToStageMap[event] = stageMask; 8050 } 8051 return false; 8052} 8053 8054VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 8055vkCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) { 8056 bool skipCall = false; 8057 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8058 std::unique_lock<std::mutex> lock(global_lock); 8059 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8060 if (pCB) { 8061 skipCall |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()"); 8062 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent"); 8063 pCB->events.push_back(event); 8064 std::function<bool(VkQueue)> eventUpdate = 8065 std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask); 8066 pCB->eventUpdates.push_back(eventUpdate); 8067 } 8068 lock.unlock(); 8069 if (!skipCall) 8070 dev_data->device_dispatch_table->CmdSetEvent(commandBuffer, event, stageMask); 8071} 8072 8073VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 8074vkCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) { 8075 bool skipCall = false; 8076 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8077 std::unique_lock<std::mutex> lock(global_lock); 8078 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8079 if (pCB) { 8080 skipCall |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()"); 8081 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent"); 8082 pCB->events.push_back(event); 8083 std::function<bool(VkQueue)> eventUpdate = 8084 std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0)); 8085 pCB->eventUpdates.push_back(eventUpdate); 8086 } 8087 lock.unlock(); 8088 if (!skipCall) 8089 dev_data->device_dispatch_table->CmdResetEvent(commandBuffer, event, stageMask); 8090} 8091 8092static bool TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount, 8093 const VkImageMemoryBarrier *pImgMemBarriers) { 8094 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map); 8095 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer); 8096 bool skip = false; 8097 uint32_t levelCount = 0; 8098 uint32_t layerCount = 0; 8099 8100 for (uint32_t i = 0; i < memBarrierCount; ++i) { 8101 auto mem_barrier = &pImgMemBarriers[i]; 8102 if (!mem_barrier) 8103 continue; 8104 // TODO: Do not iterate over every possibility - consolidate where 8105 // possible 8106 ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image); 8107 8108 for (uint32_t j = 0; j < levelCount; j++) { 8109 uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j; 8110 for (uint32_t k = 0; k < layerCount; k++) { 8111 uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k; 8112 VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer}; 8113 IMAGE_CMD_BUF_LAYOUT_NODE node; 8114 if (!FindLayout(pCB, mem_barrier->image, sub, node)) { 8115 SetLayout(pCB, mem_barrier->image, sub, 8116 IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout)); 8117 continue; 8118 } 8119 if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) { 8120 // TODO: Set memory invalid which is in mem_tracker currently 8121 } else if (node.layout != mem_barrier->oldLayout) { 8122 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 8123 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s " 8124 "when current layout is %s.", 8125 string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout)); 8126 } 8127 SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout); 8128 } 8129 } 8130 } 8131 return skip; 8132} 8133 8134// Print readable FlagBits in FlagMask 8135static std::string string_VkAccessFlags(VkAccessFlags accessMask) { 8136 std::string result; 8137 std::string separator; 8138 8139 if (accessMask == 0) { 8140 result = "[None]"; 8141 } else { 8142 result = "["; 8143 for (auto i = 0; i < 32; i++) { 8144 if (accessMask & (1 << i)) { 8145 result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i)); 8146 separator = " | "; 8147 } 8148 } 8149 result = result + "]"; 8150 } 8151 return result; 8152} 8153 8154// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set. 8155// If required_bit is zero, accessMask must have at least one of 'optional_bits' set 8156// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions 8157static bool ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask, 8158 const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits, 8159 const char *type) { 8160 bool skip_call = false; 8161 8162 if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) { 8163 if (accessMask & !(required_bit | optional_bits)) { 8164 // TODO: Verify against Valid Use 8165 skip_call |= 8166 log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8167 DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.", 8168 type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout)); 8169 } 8170 } else { 8171 if (!required_bit) { 8172 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8173 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d " 8174 "%s when layout is %s, unless the app has previously added a " 8175 "barrier for this transition.", 8176 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits, 8177 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout)); 8178 } else { 8179 std::string opt_bits; 8180 if (optional_bits != 0) { 8181 std::stringstream ss; 8182 ss << optional_bits; 8183 opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits); 8184 } 8185 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8186 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when " 8187 "layout is %s, unless the app has previously added a barrier for " 8188 "this transition.", 8189 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit, 8190 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout)); 8191 } 8192 } 8193 return skip_call; 8194} 8195 8196static bool ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask, 8197 const VkImageLayout &layout, const char *type) { 8198 bool skip_call = false; 8199 switch (layout) { 8200 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: { 8201 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, 8202 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type); 8203 break; 8204 } 8205 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: { 8206 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, 8207 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type); 8208 break; 8209 } 8210 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: { 8211 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type); 8212 break; 8213 } 8214 case VK_IMAGE_LAYOUT_PREINITIALIZED: { 8215 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type); 8216 break; 8217 } 8218 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: { 8219 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0, 8220 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type); 8221 break; 8222 } 8223 case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: { 8224 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0, 8225 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type); 8226 break; 8227 } 8228 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: { 8229 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type); 8230 break; 8231 } 8232 case VK_IMAGE_LAYOUT_UNDEFINED: { 8233 if (accessMask != 0) { 8234 // TODO: Verify against Valid Use section spec 8235 skip_call |= 8236 log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8237 DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.", 8238 type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout)); 8239 } 8240 break; 8241 } 8242 case VK_IMAGE_LAYOUT_GENERAL: 8243 default: { break; } 8244 } 8245 return skip_call; 8246} 8247 8248static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount, 8249 const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount, 8250 const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount, 8251 const VkImageMemoryBarrier *pImageMemBarriers) { 8252 bool skip_call = false; 8253 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map); 8254 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer); 8255 if (pCB->activeRenderPass && memBarrierCount) { 8256 if (!dev_data->renderPassMap[pCB->activeRenderPass]->hasSelfDependency[pCB->activeSubpass]) { 8257 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8258 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d " 8259 "with no self dependency specified.", 8260 funcName, pCB->activeSubpass); 8261 } 8262 } 8263 for (uint32_t i = 0; i < imageMemBarrierCount; ++i) { 8264 auto mem_barrier = &pImageMemBarriers[i]; 8265 auto image_data = dev_data->imageMap.find(mem_barrier->image); 8266 if (image_data != dev_data->imageMap.end()) { 8267 uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex; 8268 uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex; 8269 if (image_data->second.createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) { 8270 // srcQueueFamilyIndex and dstQueueFamilyIndex must both 8271 // be VK_QUEUE_FAMILY_IGNORED 8272 if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) { 8273 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 8274 __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS", 8275 "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of " 8276 "VK_SHARING_MODE_CONCURRENT. Src and dst " 8277 " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.", 8278 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image)); 8279 } 8280 } else { 8281 // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and 8282 // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED, 8283 // or both be a valid queue family 8284 if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) && 8285 (src_q_f_index != dst_q_f_index)) { 8286 skip_call |= 8287 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8288 DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode " 8289 "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or " 8290 "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both " 8291 "must be.", 8292 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image)); 8293 } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) && 8294 ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) || 8295 (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) { 8296 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 8297 __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS", 8298 "%s: Image 0x%" PRIx64 " was created with sharingMode " 8299 "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d" 8300 " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER 8301 "queueFamilies crated for this device.", 8302 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index, 8303 dst_q_f_index, dev_data->phys_dev_properties.queue_family_properties.size()); 8304 } 8305 } 8306 } 8307 8308 if (mem_barrier) { 8309 skip_call |= 8310 ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source"); 8311 skip_call |= 8312 ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest"); 8313 if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) { 8314 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8315 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or " 8316 "PREINITIALIZED.", 8317 funcName); 8318 } 8319 auto image_data = dev_data->imageMap.find(mem_barrier->image); 8320 VkFormat format = VK_FORMAT_UNDEFINED; 8321 uint32_t arrayLayers = 0, mipLevels = 0; 8322 bool imageFound = false; 8323 if (image_data != dev_data->imageMap.end()) { 8324 format = image_data->second.createInfo.format; 8325 arrayLayers = image_data->second.createInfo.arrayLayers; 8326 mipLevels = image_data->second.createInfo.mipLevels; 8327 imageFound = true; 8328 } else if (dev_data->device_extensions.wsi_enabled) { 8329 auto imageswap_data = dev_data->device_extensions.imageToSwapchainMap.find(mem_barrier->image); 8330 if (imageswap_data != dev_data->device_extensions.imageToSwapchainMap.end()) { 8331 auto swapchain_data = dev_data->device_extensions.swapchainMap.find(imageswap_data->second); 8332 if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) { 8333 format = swapchain_data->second->createInfo.imageFormat; 8334 arrayLayers = swapchain_data->second->createInfo.imageArrayLayers; 8335 mipLevels = 1; 8336 imageFound = true; 8337 } 8338 } 8339 } 8340 if (imageFound) { 8341 if (vk_format_is_depth_and_stencil(format) && 8342 (!(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) || 8343 !(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT))) { 8344 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8345 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth and stencil format and thus must " 8346 "have both VK_IMAGE_ASPECT_DEPTH_BIT and " 8347 "VK_IMAGE_ASPECT_STENCIL_BIT set.", 8348 funcName); 8349 } 8350 int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS) 8351 ? 1 8352 : mem_barrier->subresourceRange.layerCount; 8353 if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) { 8354 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8355 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the " 8356 "baseArrayLayer (%d) and layerCount (%d) be less " 8357 "than or equal to the total number of layers (%d).", 8358 funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount, 8359 arrayLayers); 8360 } 8361 int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS) 8362 ? 1 8363 : mem_barrier->subresourceRange.levelCount; 8364 if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) { 8365 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8366 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel " 8367 "(%d) and levelCount (%d) be less than or equal to " 8368 "the total number of levels (%d).", 8369 funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount, 8370 mipLevels); 8371 } 8372 } 8373 } 8374 } 8375 for (uint32_t i = 0; i < bufferBarrierCount; ++i) { 8376 auto mem_barrier = &pBufferMemBarriers[i]; 8377 if (pCB->activeRenderPass) { 8378 skip_call |= 8379 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8380 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName); 8381 } 8382 if (!mem_barrier) 8383 continue; 8384 8385 // Validate buffer barrier queue family indices 8386 if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED && 8387 mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) || 8388 (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED && 8389 mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) { 8390 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8391 DRAWSTATE_INVALID_QUEUE_INDEX, "DS", 8392 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater " 8393 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.", 8394 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer), 8395 dev_data->phys_dev_properties.queue_family_properties.size()); 8396 } 8397 8398 auto buffer_data = dev_data->bufferMap.find(mem_barrier->buffer); 8399 VkDeviceSize buffer_size = (buffer_data->second.createInfo.sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO) 8400 ? buffer_data->second.createInfo.size 8401 : 0; 8402 if (buffer_data != dev_data->bufferMap.end()) { 8403 if (mem_barrier->offset >= buffer_size) { 8404 skip_call |= log_msg( 8405 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8406 DRAWSTATE_INVALID_BARRIER, "DS", 8407 "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64 " which is not less than total size %" PRIu64 ".", 8408 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer), 8409 reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(buffer_size)); 8410 } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) { 8411 skip_call |= log_msg( 8412 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8413 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64 " and size %" PRIu64 8414 " whose sum is greater than total size %" PRIu64 ".", 8415 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer), 8416 reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(mem_barrier->size), 8417 reinterpret_cast<const uint64_t &>(buffer_size)); 8418 } 8419 } 8420 } 8421 return skip_call; 8422} 8423 8424bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) { 8425 bool skip_call = false; 8426 VkPipelineStageFlags stageMask = 0; 8427 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map); 8428 for (uint32_t i = 0; i < eventCount; ++i) { 8429 auto event = pCB->events[firstEventIndex + i]; 8430 auto queue_data = dev_data->queueMap.find(queue); 8431 if (queue_data == dev_data->queueMap.end()) 8432 return false; 8433 auto event_data = queue_data->second.eventToStageMap.find(event); 8434 if (event_data != queue_data->second.eventToStageMap.end()) { 8435 stageMask |= event_data->second; 8436 } else { 8437 auto global_event_data = dev_data->eventMap.find(event); 8438 if (global_event_data == dev_data->eventMap.end()) { 8439 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT, 8440 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS", 8441 "Event 0x%" PRIx64 " cannot be waited on if it has never been set.", 8442 reinterpret_cast<const uint64_t &>(event)); 8443 } else { 8444 stageMask |= global_event_data->second.stageMask; 8445 } 8446 } 8447 } 8448 if (sourceStageMask != stageMask) { 8449 skip_call |= 8450 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8451 DRAWSTATE_INVALID_EVENT, "DS", 8452 "Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%x which must be the bitwise OR of the " 8453 "stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with vkSetEvent.", 8454 sourceStageMask); 8455 } 8456 return skip_call; 8457} 8458 8459VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 8460vkCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask, 8461 VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, 8462 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, 8463 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { 8464 bool skipCall = false; 8465 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8466 std::unique_lock<std::mutex> lock(global_lock); 8467 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8468 if (pCB) { 8469 auto firstEventIndex = pCB->events.size(); 8470 for (uint32_t i = 0; i < eventCount; ++i) { 8471 pCB->waitedEvents.push_back(pEvents[i]); 8472 pCB->events.push_back(pEvents[i]); 8473 } 8474 std::function<bool(VkQueue)> eventUpdate = 8475 std::bind(validateEventStageMask, std::placeholders::_1, pCB, eventCount, firstEventIndex, sourceStageMask); 8476 pCB->eventUpdates.push_back(eventUpdate); 8477 if (pCB->state == CB_RECORDING) { 8478 skipCall |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()"); 8479 } else { 8480 skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()"); 8481 } 8482 skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers); 8483 skipCall |= 8484 ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, 8485 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); 8486 } 8487 lock.unlock(); 8488 if (!skipCall) 8489 dev_data->device_dispatch_table->CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask, 8490 memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, 8491 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); 8492} 8493 8494VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 8495vkCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, 8496 VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, 8497 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, 8498 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { 8499 bool skipCall = false; 8500 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8501 std::unique_lock<std::mutex> lock(global_lock); 8502 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8503 if (pCB) { 8504 skipCall |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()"); 8505 skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers); 8506 skipCall |= 8507 ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, 8508 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); 8509 } 8510 lock.unlock(); 8511 if (!skipCall) 8512 dev_data->device_dispatch_table->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, 8513 memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, 8514 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); 8515} 8516 8517VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 8518vkCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) { 8519 bool skipCall = false; 8520 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8521 std::unique_lock<std::mutex> lock(global_lock); 8522 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8523 if (pCB) { 8524 QueryObject query = {queryPool, slot}; 8525 pCB->activeQueries.insert(query); 8526 if (!pCB->startedQueries.count(query)) { 8527 pCB->startedQueries.insert(query); 8528 } 8529 skipCall |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()"); 8530 } 8531 lock.unlock(); 8532 if (!skipCall) 8533 dev_data->device_dispatch_table->CmdBeginQuery(commandBuffer, queryPool, slot, flags); 8534} 8535 8536VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) { 8537 bool skipCall = false; 8538 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8539 std::unique_lock<std::mutex> lock(global_lock); 8540 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8541 if (pCB) { 8542 QueryObject query = {queryPool, slot}; 8543 if (!pCB->activeQueries.count(query)) { 8544 skipCall |= 8545 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8546 DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool %" PRIu64 ", index %d", 8547 (uint64_t)(queryPool), slot); 8548 } else { 8549 pCB->activeQueries.erase(query); 8550 } 8551 pCB->queryToStateMap[query] = 1; 8552 if (pCB->state == CB_RECORDING) { 8553 skipCall |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()"); 8554 } else { 8555 skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()"); 8556 } 8557 } 8558 lock.unlock(); 8559 if (!skipCall) 8560 dev_data->device_dispatch_table->CmdEndQuery(commandBuffer, queryPool, slot); 8561} 8562 8563VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 8564vkCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) { 8565 bool skipCall = false; 8566 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8567 std::unique_lock<std::mutex> lock(global_lock); 8568 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8569 if (pCB) { 8570 for (uint32_t i = 0; i < queryCount; i++) { 8571 QueryObject query = {queryPool, firstQuery + i}; 8572 pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents; 8573 pCB->queryToStateMap[query] = 0; 8574 } 8575 if (pCB->state == CB_RECORDING) { 8576 skipCall |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()"); 8577 } else { 8578 skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()"); 8579 } 8580 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool"); 8581 } 8582 lock.unlock(); 8583 if (!skipCall) 8584 dev_data->device_dispatch_table->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount); 8585} 8586 8587VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 8588vkCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, 8589 VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) { 8590 bool skipCall = false; 8591 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8592 std::unique_lock<std::mutex> lock(global_lock); 8593 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8594#if MTMERGESOURCE 8595 VkDeviceMemory mem; 8596 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 8597 skipCall |= 8598 get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem); 8599 if (cb_data != dev_data->commandBufferMap.end()) { 8600 std::function<bool()> function = [=]() { 8601 set_memory_valid(dev_data, mem, true); 8602 return false; 8603 }; 8604 cb_data->second->validate_functions.push_back(function); 8605 } 8606 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyQueryPoolResults"); 8607 // Validate that DST buffer has correct usage flags set 8608 skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, 8609 "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); 8610#endif 8611 if (pCB) { 8612 for (uint32_t i = 0; i < queryCount; i++) { 8613 QueryObject query = {queryPool, firstQuery + i}; 8614 if (!pCB->queryToStateMap[query]) { 8615 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 8616 __LINE__, DRAWSTATE_INVALID_QUERY, "DS", 8617 "Requesting a copy from query to buffer with invalid query: queryPool %" PRIu64 ", index %d", 8618 (uint64_t)(queryPool), firstQuery + i); 8619 } 8620 } 8621 if (pCB->state == CB_RECORDING) { 8622 skipCall |= addCmd(dev_data, pCB, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()"); 8623 } else { 8624 skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()"); 8625 } 8626 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyQueryPoolResults"); 8627 } 8628 lock.unlock(); 8629 if (!skipCall) 8630 dev_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, 8631 dstOffset, stride, flags); 8632} 8633 8634VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, 8635 VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, 8636 const void *pValues) { 8637 bool skipCall = false; 8638 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8639 std::unique_lock<std::mutex> lock(global_lock); 8640 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8641 if (pCB) { 8642 if (pCB->state == CB_RECORDING) { 8643 skipCall |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()"); 8644 } else { 8645 skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()"); 8646 } 8647 } 8648 if ((offset + size) > dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize) { 8649 skipCall |= validatePushConstantSize(dev_data, offset, size, "vkCmdPushConstants()"); 8650 } 8651 // TODO : Add warning if push constant update doesn't align with range 8652 lock.unlock(); 8653 if (!skipCall) 8654 dev_data->device_dispatch_table->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues); 8655} 8656 8657VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 8658vkCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) { 8659 bool skipCall = false; 8660 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8661 std::unique_lock<std::mutex> lock(global_lock); 8662 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8663 if (pCB) { 8664 QueryObject query = {queryPool, slot}; 8665 pCB->queryToStateMap[query] = 1; 8666 if (pCB->state == CB_RECORDING) { 8667 skipCall |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()"); 8668 } else { 8669 skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()"); 8670 } 8671 } 8672 lock.unlock(); 8673 if (!skipCall) 8674 dev_data->device_dispatch_table->CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot); 8675} 8676 8677VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo, 8678 const VkAllocationCallbacks *pAllocator, 8679 VkFramebuffer *pFramebuffer) { 8680 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 8681 VkResult result = dev_data->device_dispatch_table->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer); 8682 if (VK_SUCCESS == result) { 8683 // Shadow create info and store in map 8684 std::lock_guard<std::mutex> lock(global_lock); 8685 8686 auto & fbNode = dev_data->frameBufferMap[*pFramebuffer]; 8687 fbNode.createInfo = *pCreateInfo; 8688 if (pCreateInfo->pAttachments) { 8689 auto attachments = new VkImageView[pCreateInfo->attachmentCount]; 8690 memcpy(attachments, 8691 pCreateInfo->pAttachments, 8692 pCreateInfo->attachmentCount * sizeof(VkImageView)); 8693 fbNode.createInfo.pAttachments = attachments; 8694 } 8695 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { 8696 VkImageView view = pCreateInfo->pAttachments[i]; 8697 auto view_data = dev_data->imageViewMap.find(view); 8698 if (view_data == dev_data->imageViewMap.end()) { 8699 continue; 8700 } 8701 MT_FB_ATTACHMENT_INFO fb_info; 8702 get_mem_binding_from_object(dev_data, (uint64_t)(view_data->second.image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 8703 &fb_info.mem); 8704 fb_info.image = view_data->second.image; 8705 fbNode.attachments.push_back(fb_info); 8706 } 8707 } 8708 return result; 8709} 8710 8711static bool FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node, 8712 std::unordered_set<uint32_t> &processed_nodes) { 8713 // If we have already checked this node we have not found a dependency path so return false. 8714 if (processed_nodes.count(index)) 8715 return false; 8716 processed_nodes.insert(index); 8717 const DAGNode &node = subpass_to_node[index]; 8718 // Look for a dependency path. If one exists return true else recurse on the previous nodes. 8719 if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) { 8720 for (auto elem : node.prev) { 8721 if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) 8722 return true; 8723 } 8724 } else { 8725 return true; 8726 } 8727 return false; 8728} 8729 8730static bool CheckDependencyExists(const layer_data *my_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses, 8731 const std::vector<DAGNode> &subpass_to_node, bool &skip_call) { 8732 bool result = true; 8733 // Loop through all subpasses that share the same attachment and make sure a dependency exists 8734 for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) { 8735 if (static_cast<uint32_t>(subpass) == dependent_subpasses[k]) 8736 continue; 8737 const DAGNode &node = subpass_to_node[subpass]; 8738 // Check for a specified dependency between the two nodes. If one exists we are done. 8739 auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]); 8740 auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]); 8741 if (prev_elem == node.prev.end() && next_elem == node.next.end()) { 8742 // If no dependency exits an implicit dependency still might. If so, warn and if not throw an error. 8743 std::unordered_set<uint32_t> processed_nodes; 8744 if (FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) || 8745 FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes)) { 8746 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 8747 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 8748 "A dependency between subpasses %d and %d must exist but only an implicit one is specified.", 8749 subpass, dependent_subpasses[k]); 8750 } else { 8751 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 8752 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 8753 "A dependency between subpasses %d and %d must exist but one is not specified.", subpass, 8754 dependent_subpasses[k]); 8755 result = false; 8756 } 8757 } 8758 } 8759 return result; 8760} 8761 8762static bool CheckPreserved(const layer_data *my_data, const VkRenderPassCreateInfo *pCreateInfo, const int index, 8763 const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip_call) { 8764 const DAGNode &node = subpass_to_node[index]; 8765 // If this node writes to the attachment return true as next nodes need to preserve the attachment. 8766 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index]; 8767 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { 8768 if (attachment == subpass.pColorAttachments[j].attachment) 8769 return true; 8770 } 8771 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { 8772 if (attachment == subpass.pDepthStencilAttachment->attachment) 8773 return true; 8774 } 8775 bool result = false; 8776 // Loop through previous nodes and see if any of them write to the attachment. 8777 for (auto elem : node.prev) { 8778 result |= CheckPreserved(my_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call); 8779 } 8780 // If the attachment was written to by a previous node than this node needs to preserve it. 8781 if (result && depth > 0) { 8782 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index]; 8783 bool has_preserved = false; 8784 for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) { 8785 if (subpass.pPreserveAttachments[j] == attachment) { 8786 has_preserved = true; 8787 break; 8788 } 8789 } 8790 if (!has_preserved) { 8791 skip_call |= 8792 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8793 DRAWSTATE_INVALID_RENDERPASS, "DS", 8794 "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index); 8795 } 8796 } 8797 return result; 8798} 8799 8800template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) { 8801 return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) || 8802 ((offset1 > offset2) && (offset1 < (offset2 + size2))); 8803} 8804 8805bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) { 8806 return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) && 8807 isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount)); 8808} 8809 8810static bool ValidateDependencies(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin, 8811 const std::vector<DAGNode> &subpass_to_node) { 8812 bool skip_call = false; 8813 const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo; 8814 const VkRenderPassCreateInfo *pCreateInfo = my_data->renderPassMap.at(pRenderPassBegin->renderPass)->pCreateInfo; 8815 std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount); 8816 std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount); 8817 std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount); 8818 // Find overlapping attachments 8819 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { 8820 for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) { 8821 VkImageView viewi = pFramebufferInfo->pAttachments[i]; 8822 VkImageView viewj = pFramebufferInfo->pAttachments[j]; 8823 if (viewi == viewj) { 8824 overlapping_attachments[i].push_back(j); 8825 overlapping_attachments[j].push_back(i); 8826 continue; 8827 } 8828 auto view_data_i = my_data->imageViewMap.find(viewi); 8829 auto view_data_j = my_data->imageViewMap.find(viewj); 8830 if (view_data_i == my_data->imageViewMap.end() || view_data_j == my_data->imageViewMap.end()) { 8831 continue; 8832 } 8833 if (view_data_i->second.image == view_data_j->second.image && 8834 isRegionOverlapping(view_data_i->second.subresourceRange, view_data_j->second.subresourceRange)) { 8835 overlapping_attachments[i].push_back(j); 8836 overlapping_attachments[j].push_back(i); 8837 continue; 8838 } 8839 auto image_data_i = my_data->imageMap.find(view_data_i->second.image); 8840 auto image_data_j = my_data->imageMap.find(view_data_j->second.image); 8841 if (image_data_i == my_data->imageMap.end() || image_data_j == my_data->imageMap.end()) { 8842 continue; 8843 } 8844 if (image_data_i->second.mem == image_data_j->second.mem && 8845 isRangeOverlapping(image_data_i->second.memOffset, image_data_i->second.memSize, image_data_j->second.memOffset, 8846 image_data_j->second.memSize)) { 8847 overlapping_attachments[i].push_back(j); 8848 overlapping_attachments[j].push_back(i); 8849 } 8850 } 8851 } 8852 for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) { 8853 uint32_t attachment = i; 8854 for (auto other_attachment : overlapping_attachments[i]) { 8855 if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) { 8856 skip_call |= 8857 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8858 DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't " 8859 "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.", 8860 attachment, other_attachment); 8861 } 8862 if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) { 8863 skip_call |= 8864 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8865 DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't " 8866 "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.", 8867 other_attachment, attachment); 8868 } 8869 } 8870 } 8871 // Find for each attachment the subpasses that use them. 8872 unordered_set<uint32_t> attachmentIndices; 8873 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { 8874 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i]; 8875 attachmentIndices.clear(); 8876 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { 8877 uint32_t attachment = subpass.pInputAttachments[j].attachment; 8878 input_attachment_to_subpass[attachment].push_back(i); 8879 for (auto overlapping_attachment : overlapping_attachments[attachment]) { 8880 input_attachment_to_subpass[overlapping_attachment].push_back(i); 8881 } 8882 } 8883 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { 8884 uint32_t attachment = subpass.pColorAttachments[j].attachment; 8885 output_attachment_to_subpass[attachment].push_back(i); 8886 for (auto overlapping_attachment : overlapping_attachments[attachment]) { 8887 output_attachment_to_subpass[overlapping_attachment].push_back(i); 8888 } 8889 attachmentIndices.insert(attachment); 8890 } 8891 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { 8892 uint32_t attachment = subpass.pDepthStencilAttachment->attachment; 8893 output_attachment_to_subpass[attachment].push_back(i); 8894 for (auto overlapping_attachment : overlapping_attachments[attachment]) { 8895 output_attachment_to_subpass[overlapping_attachment].push_back(i); 8896 } 8897 8898 if (attachmentIndices.count(attachment)) { 8899 skip_call |= 8900 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 8901 0, __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 8902 "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", 8903 attachment, i); 8904 } 8905 } 8906 } 8907 // If there is a dependency needed make sure one exists 8908 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { 8909 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i]; 8910 // If the attachment is an input then all subpasses that output must have a dependency relationship 8911 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { 8912 const uint32_t &attachment = subpass.pInputAttachments[j].attachment; 8913 CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call); 8914 } 8915 // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship 8916 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { 8917 const uint32_t &attachment = subpass.pColorAttachments[j].attachment; 8918 CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call); 8919 CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call); 8920 } 8921 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { 8922 const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment; 8923 CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call); 8924 CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call); 8925 } 8926 } 8927 // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was 8928 // written. 8929 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { 8930 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i]; 8931 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { 8932 CheckPreserved(my_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call); 8933 } 8934 } 8935 return skip_call; 8936} 8937 8938static bool ValidateLayouts(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) { 8939 bool skip = false; 8940 8941 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { 8942 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i]; 8943 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { 8944 if (subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL && 8945 subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) { 8946 if (subpass.pInputAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) { 8947 // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance 8948 skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, 8949 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 8950 "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL."); 8951 } else { 8952 skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8953 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 8954 "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.", 8955 string_VkImageLayout(subpass.pInputAttachments[j].layout)); 8956 } 8957 } 8958 } 8959 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { 8960 if (subpass.pColorAttachments[j].layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) { 8961 if (subpass.pColorAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) { 8962 // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance 8963 skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, 8964 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 8965 "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL."); 8966 } else { 8967 skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8968 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 8969 "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.", 8970 string_VkImageLayout(subpass.pColorAttachments[j].layout)); 8971 } 8972 } 8973 } 8974 if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) { 8975 if (subpass.pDepthStencilAttachment->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) { 8976 if (subpass.pDepthStencilAttachment->layout == VK_IMAGE_LAYOUT_GENERAL) { 8977 // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance 8978 skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, 8979 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 8980 "Layout for depth attachment is GENERAL but should be DEPTH_STENCIL_ATTACHMENT_OPTIMAL."); 8981 } else { 8982 skip |= 8983 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8984 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 8985 "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL or GENERAL.", 8986 string_VkImageLayout(subpass.pDepthStencilAttachment->layout)); 8987 } 8988 } 8989 } 8990 } 8991 return skip; 8992} 8993 8994static bool CreatePassDAG(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo, 8995 std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) { 8996 bool skip_call = false; 8997 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { 8998 DAGNode &subpass_node = subpass_to_node[i]; 8999 subpass_node.pass = i; 9000 } 9001 for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) { 9002 const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i]; 9003 if (dependency.srcSubpass > dependency.dstSubpass && dependency.srcSubpass != VK_SUBPASS_EXTERNAL && 9004 dependency.dstSubpass != VK_SUBPASS_EXTERNAL) { 9005 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9006 DRAWSTATE_INVALID_RENDERPASS, "DS", 9007 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass."); 9008 } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL && dependency.dstSubpass == VK_SUBPASS_EXTERNAL) { 9009 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9010 DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external."); 9011 } else if (dependency.srcSubpass == dependency.dstSubpass) { 9012 has_self_dependency[dependency.srcSubpass] = true; 9013 } 9014 if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL) { 9015 subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass); 9016 } 9017 if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL) { 9018 subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass); 9019 } 9020 } 9021 return skip_call; 9022} 9023 9024 9025VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo, 9026 const VkAllocationCallbacks *pAllocator, 9027 VkShaderModule *pShaderModule) { 9028 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 9029 bool skip_call = false; 9030 if (!shader_is_spirv(pCreateInfo)) { 9031 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 9032 /* dev */ 0, __LINE__, SHADER_CHECKER_NON_SPIRV_SHADER, "SC", "Shader is not SPIR-V"); 9033 } 9034 9035 if (skip_call) 9036 return VK_ERROR_VALIDATION_FAILED_EXT; 9037 9038 VkResult res = my_data->device_dispatch_table->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule); 9039 9040 if (res == VK_SUCCESS) { 9041 std::lock_guard<std::mutex> lock(global_lock); 9042 my_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo)); 9043 } 9044 return res; 9045} 9046 9047VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo, 9048 const VkAllocationCallbacks *pAllocator, 9049 VkRenderPass *pRenderPass) { 9050 bool skip_call = false; 9051 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 9052 std::unique_lock<std::mutex> lock(global_lock); 9053 // Create DAG 9054 std::vector<bool> has_self_dependency(pCreateInfo->subpassCount); 9055 std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount); 9056 skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency); 9057 // Validate 9058 skip_call |= ValidateLayouts(dev_data, device, pCreateInfo); 9059 if (skip_call) { 9060 lock.unlock(); 9061 return VK_ERROR_VALIDATION_FAILED_EXT; 9062 } 9063 lock.unlock(); 9064 VkResult result = dev_data->device_dispatch_table->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass); 9065 if (VK_SUCCESS == result) { 9066 lock.lock(); 9067 // TODOSC : Merge in tracking of renderpass from shader_checker 9068 // Shadow create info and store in map 9069 VkRenderPassCreateInfo *localRPCI = new VkRenderPassCreateInfo(*pCreateInfo); 9070 if (pCreateInfo->pAttachments) { 9071 localRPCI->pAttachments = new VkAttachmentDescription[localRPCI->attachmentCount]; 9072 memcpy((void *)localRPCI->pAttachments, pCreateInfo->pAttachments, 9073 localRPCI->attachmentCount * sizeof(VkAttachmentDescription)); 9074 } 9075 if (pCreateInfo->pSubpasses) { 9076 localRPCI->pSubpasses = new VkSubpassDescription[localRPCI->subpassCount]; 9077 memcpy((void *)localRPCI->pSubpasses, pCreateInfo->pSubpasses, localRPCI->subpassCount * sizeof(VkSubpassDescription)); 9078 9079 for (uint32_t i = 0; i < localRPCI->subpassCount; i++) { 9080 VkSubpassDescription *subpass = (VkSubpassDescription *)&localRPCI->pSubpasses[i]; 9081 const uint32_t attachmentCount = subpass->inputAttachmentCount + 9082 subpass->colorAttachmentCount * (1 + (subpass->pResolveAttachments ? 1 : 0)) + 9083 ((subpass->pDepthStencilAttachment) ? 1 : 0) + subpass->preserveAttachmentCount; 9084 VkAttachmentReference *attachments = new VkAttachmentReference[attachmentCount]; 9085 9086 memcpy(attachments, subpass->pInputAttachments, sizeof(attachments[0]) * subpass->inputAttachmentCount); 9087 subpass->pInputAttachments = attachments; 9088 attachments += subpass->inputAttachmentCount; 9089 9090 memcpy(attachments, subpass->pColorAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount); 9091 subpass->pColorAttachments = attachments; 9092 attachments += subpass->colorAttachmentCount; 9093 9094 if (subpass->pResolveAttachments) { 9095 memcpy(attachments, subpass->pResolveAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount); 9096 subpass->pResolveAttachments = attachments; 9097 attachments += subpass->colorAttachmentCount; 9098 } 9099 9100 if (subpass->pDepthStencilAttachment) { 9101 memcpy(attachments, subpass->pDepthStencilAttachment, sizeof(attachments[0]) * 1); 9102 subpass->pDepthStencilAttachment = attachments; 9103 attachments += 1; 9104 } 9105 9106 memcpy(attachments, subpass->pPreserveAttachments, sizeof(attachments[0]) * subpass->preserveAttachmentCount); 9107 subpass->pPreserveAttachments = &attachments->attachment; 9108 } 9109 } 9110 if (pCreateInfo->pDependencies) { 9111 localRPCI->pDependencies = new VkSubpassDependency[localRPCI->dependencyCount]; 9112 memcpy((void *)localRPCI->pDependencies, pCreateInfo->pDependencies, 9113 localRPCI->dependencyCount * sizeof(VkSubpassDependency)); 9114 } 9115 dev_data->renderPassMap[*pRenderPass] = new RENDER_PASS_NODE(localRPCI); 9116 dev_data->renderPassMap[*pRenderPass]->hasSelfDependency = has_self_dependency; 9117 dev_data->renderPassMap[*pRenderPass]->subpassToNode = subpass_to_node; 9118#if MTMERGESOURCE 9119 // MTMTODO : Merge with code from above to eliminate duplication 9120 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { 9121 VkAttachmentDescription desc = pCreateInfo->pAttachments[i]; 9122 MT_PASS_ATTACHMENT_INFO pass_info; 9123 pass_info.load_op = desc.loadOp; 9124 pass_info.store_op = desc.storeOp; 9125 pass_info.attachment = i; 9126 dev_data->renderPassMap[*pRenderPass]->attachments.push_back(pass_info); 9127 } 9128 // TODO: Maybe fill list and then copy instead of locking 9129 std::unordered_map<uint32_t, bool> &attachment_first_read = dev_data->renderPassMap[*pRenderPass]->attachment_first_read; 9130 std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout = 9131 dev_data->renderPassMap[*pRenderPass]->attachment_first_layout; 9132 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { 9133 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i]; 9134 if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) { 9135 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 9136 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 9137 "Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", i); 9138 } 9139 for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) { 9140 uint32_t attachment = subpass.pPreserveAttachments[j]; 9141 if (attachment >= pCreateInfo->attachmentCount) { 9142 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 9143 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 9144 "Preserve attachment %d cannot be greater than the total number of attachments %d.", 9145 attachment, pCreateInfo->attachmentCount); 9146 } 9147 } 9148 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { 9149 uint32_t attachment; 9150 if (subpass.pResolveAttachments) { 9151 attachment = subpass.pResolveAttachments[j].attachment; 9152 if (attachment >= pCreateInfo->attachmentCount && attachment != VK_ATTACHMENT_UNUSED) { 9153 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 9154 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 9155 "Color attachment %d cannot be greater than the total number of attachments %d.", 9156 attachment, pCreateInfo->attachmentCount); 9157 continue; 9158 } 9159 } 9160 attachment = subpass.pColorAttachments[j].attachment; 9161 if (attachment >= pCreateInfo->attachmentCount) { 9162 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 9163 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 9164 "Color attachment %d cannot be greater than the total number of attachments %d.", 9165 attachment, pCreateInfo->attachmentCount); 9166 continue; 9167 } 9168 if (attachment_first_read.count(attachment)) 9169 continue; 9170 attachment_first_read.insert(std::make_pair(attachment, false)); 9171 attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout)); 9172 } 9173 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { 9174 uint32_t attachment = subpass.pDepthStencilAttachment->attachment; 9175 if (attachment >= pCreateInfo->attachmentCount) { 9176 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 9177 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 9178 "Depth stencil attachment %d cannot be greater than the total number of attachments %d.", 9179 attachment, pCreateInfo->attachmentCount); 9180 continue; 9181 } 9182 if (attachment_first_read.count(attachment)) 9183 continue; 9184 attachment_first_read.insert(std::make_pair(attachment, false)); 9185 attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout)); 9186 } 9187 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { 9188 uint32_t attachment = subpass.pInputAttachments[j].attachment; 9189 if (attachment >= pCreateInfo->attachmentCount) { 9190 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 9191 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 9192 "Input attachment %d cannot be greater than the total number of attachments %d.", 9193 attachment, pCreateInfo->attachmentCount); 9194 continue; 9195 } 9196 if (attachment_first_read.count(attachment)) 9197 continue; 9198 attachment_first_read.insert(std::make_pair(attachment, true)); 9199 attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout)); 9200 } 9201 } 9202#endif 9203 lock.unlock(); 9204 } 9205 return result; 9206} 9207// Free the renderpass shadow 9208static void deleteRenderPasses(layer_data *my_data) { 9209 if (my_data->renderPassMap.size() <= 0) 9210 return; 9211 for (auto ii = my_data->renderPassMap.begin(); ii != my_data->renderPassMap.end(); ++ii) { 9212 const VkRenderPassCreateInfo *pRenderPassInfo = (*ii).second->pCreateInfo; 9213 delete[] pRenderPassInfo->pAttachments; 9214 if (pRenderPassInfo->pSubpasses) { 9215 for (uint32_t i = 0; i < pRenderPassInfo->subpassCount; ++i) { 9216 // Attachements are all allocated in a block, so just need to 9217 // find the first non-null one to delete 9218 if (pRenderPassInfo->pSubpasses[i].pInputAttachments) { 9219 delete[] pRenderPassInfo->pSubpasses[i].pInputAttachments; 9220 } else if (pRenderPassInfo->pSubpasses[i].pColorAttachments) { 9221 delete[] pRenderPassInfo->pSubpasses[i].pColorAttachments; 9222 } else if (pRenderPassInfo->pSubpasses[i].pResolveAttachments) { 9223 delete[] pRenderPassInfo->pSubpasses[i].pResolveAttachments; 9224 } else if (pRenderPassInfo->pSubpasses[i].pPreserveAttachments) { 9225 delete[] pRenderPassInfo->pSubpasses[i].pPreserveAttachments; 9226 } 9227 } 9228 delete[] pRenderPassInfo->pSubpasses; 9229 } 9230 delete[] pRenderPassInfo->pDependencies; 9231 delete pRenderPassInfo; 9232 delete (*ii).second; 9233 } 9234 my_data->renderPassMap.clear(); 9235} 9236 9237static bool VerifyFramebufferAndRenderPassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) { 9238 bool skip_call = false; 9239 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map); 9240 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer); 9241 const VkRenderPassCreateInfo *pRenderPassInfo = dev_data->renderPassMap[pRenderPassBegin->renderPass]->pCreateInfo; 9242 const VkFramebufferCreateInfo framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer].createInfo; 9243 if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) { 9244 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9245 DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer " 9246 "with a different number of attachments."); 9247 } 9248 for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) { 9249 const VkImageView &image_view = framebufferInfo.pAttachments[i]; 9250 auto image_data = dev_data->imageViewMap.find(image_view); 9251 assert(image_data != dev_data->imageViewMap.end()); 9252 const VkImage &image = image_data->second.image; 9253 const VkImageSubresourceRange &subRange = image_data->second.subresourceRange; 9254 IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout, 9255 pRenderPassInfo->pAttachments[i].initialLayout}; 9256 // TODO: Do not iterate over every possibility - consolidate where possible 9257 for (uint32_t j = 0; j < subRange.levelCount; j++) { 9258 uint32_t level = subRange.baseMipLevel + j; 9259 for (uint32_t k = 0; k < subRange.layerCount; k++) { 9260 uint32_t layer = subRange.baseArrayLayer + k; 9261 VkImageSubresource sub = {subRange.aspectMask, level, layer}; 9262 IMAGE_CMD_BUF_LAYOUT_NODE node; 9263 if (!FindLayout(pCB, image, sub, node)) { 9264 SetLayout(pCB, image, sub, newNode); 9265 continue; 9266 } 9267 if (newNode.layout != node.layout) { 9268 skip_call |= 9269 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9270 DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using attachment %i " 9271 "where the " 9272 "initial layout is %s and the layout of the attachment at the " 9273 "start of the render pass is %s. The layouts must match.", 9274 i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout)); 9275 } 9276 } 9277 } 9278 } 9279 return skip_call; 9280} 9281 9282static void TransitionSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, 9283 const int subpass_index) { 9284 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map); 9285 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer); 9286 auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass); 9287 if (render_pass_data == dev_data->renderPassMap.end()) { 9288 return; 9289 } 9290 const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo; 9291 auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer); 9292 if (framebuffer_data == dev_data->frameBufferMap.end()) { 9293 return; 9294 } 9295 const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo; 9296 const VkSubpassDescription &subpass = pRenderPassInfo->pSubpasses[subpass_index]; 9297 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { 9298 const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pInputAttachments[j].attachment]; 9299 SetLayout(dev_data, pCB, image_view, subpass.pInputAttachments[j].layout); 9300 } 9301 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { 9302 const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pColorAttachments[j].attachment]; 9303 SetLayout(dev_data, pCB, image_view, subpass.pColorAttachments[j].layout); 9304 } 9305 if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) { 9306 const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pDepthStencilAttachment->attachment]; 9307 SetLayout(dev_data, pCB, image_view, subpass.pDepthStencilAttachment->layout); 9308 } 9309} 9310 9311static bool validatePrimaryCommandBuffer(const layer_data *my_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) { 9312 bool skip_call = false; 9313 if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) { 9314 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9315 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.", 9316 cmd_name.c_str()); 9317 } 9318 return skip_call; 9319} 9320 9321static void TransitionFinalSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) { 9322 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map); 9323 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer); 9324 auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass); 9325 if (render_pass_data == dev_data->renderPassMap.end()) { 9326 return; 9327 } 9328 const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo; 9329 auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer); 9330 if (framebuffer_data == dev_data->frameBufferMap.end()) { 9331 return; 9332 } 9333 const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo; 9334 for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) { 9335 const VkImageView &image_view = framebufferInfo.pAttachments[i]; 9336 SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout); 9337 } 9338} 9339 9340static bool VerifyRenderAreaBounds(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin) { 9341 bool skip_call = false; 9342 const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo; 9343 if (pRenderPassBegin->renderArea.offset.x < 0 || 9344 (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width || 9345 pRenderPassBegin->renderArea.offset.y < 0 || 9346 (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) { 9347 skip_call |= static_cast<bool>(log_msg( 9348 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9349 DRAWSTATE_INVALID_RENDER_AREA, "CORE", 9350 "Cannot execute a render pass with renderArea not within the bound of the " 9351 "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, " 9352 "height %d.", 9353 pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width, 9354 pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height)); 9355 } 9356 return skip_call; 9357} 9358 9359VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 9360vkCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) { 9361 bool skipCall = false; 9362 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 9363 std::unique_lock<std::mutex> lock(global_lock); 9364 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 9365 if (pCB) { 9366 if (pRenderPassBegin && pRenderPassBegin->renderPass) { 9367#if MTMERGE 9368 auto pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass); 9369 if (pass_data != dev_data->renderPassMap.end()) { 9370 RENDER_PASS_NODE* pRPNode = pass_data->second; 9371 pRPNode->fb = pRenderPassBegin->framebuffer; 9372 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 9373 for (size_t i = 0; i < pRPNode->attachments.size(); ++i) { 9374 MT_FB_ATTACHMENT_INFO &fb_info = dev_data->frameBufferMap[pRPNode->fb].attachments[i]; 9375 if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) { 9376 if (cb_data != dev_data->commandBufferMap.end()) { 9377 std::function<bool()> function = [=]() { 9378 set_memory_valid(dev_data, fb_info.mem, true, fb_info.image); 9379 return false; 9380 }; 9381 cb_data->second->validate_functions.push_back(function); 9382 } 9383 VkImageLayout &attachment_layout = pRPNode->attachment_first_layout[pRPNode->attachments[i].attachment]; 9384 if (attachment_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL || 9385 attachment_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) { 9386 skipCall |= 9387 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 9388 VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, (uint64_t)(pRenderPassBegin->renderPass), __LINE__, 9389 MEMTRACK_INVALID_LAYOUT, "MEM", "Cannot clear attachment %d with invalid first layout %d.", 9390 pRPNode->attachments[i].attachment, attachment_layout); 9391 } 9392 } else if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE) { 9393 if (cb_data != dev_data->commandBufferMap.end()) { 9394 std::function<bool()> function = [=]() { 9395 set_memory_valid(dev_data, fb_info.mem, false, fb_info.image); 9396 return false; 9397 }; 9398 cb_data->second->validate_functions.push_back(function); 9399 } 9400 } else if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_LOAD) { 9401 if (cb_data != dev_data->commandBufferMap.end()) { 9402 std::function<bool()> function = [=]() { 9403 return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image); 9404 }; 9405 cb_data->second->validate_functions.push_back(function); 9406 } 9407 } 9408 if (pRPNode->attachment_first_read[pRPNode->attachments[i].attachment]) { 9409 if (cb_data != dev_data->commandBufferMap.end()) { 9410 std::function<bool()> function = [=]() { 9411 return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image); 9412 }; 9413 cb_data->second->validate_functions.push_back(function); 9414 } 9415 } 9416 } 9417 } 9418#endif 9419 skipCall |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin); 9420 skipCall |= VerifyFramebufferAndRenderPassLayouts(commandBuffer, pRenderPassBegin); 9421 auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass); 9422 if (render_pass_data != dev_data->renderPassMap.end()) { 9423 skipCall |= ValidateDependencies(dev_data, pRenderPassBegin, render_pass_data->second->subpassToNode); 9424 } 9425 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBeginRenderPass"); 9426 skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdBeginRenderPass"); 9427 skipCall |= addCmd(dev_data, pCB, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()"); 9428 pCB->activeRenderPass = pRenderPassBegin->renderPass; 9429 // This is a shallow copy as that is all that is needed for now 9430 pCB->activeRenderPassBeginInfo = *pRenderPassBegin; 9431 pCB->activeSubpass = 0; 9432 pCB->activeSubpassContents = contents; 9433 pCB->framebuffers.insert(pRenderPassBegin->framebuffer); 9434 // Connect this framebuffer to this cmdBuffer 9435 dev_data->frameBufferMap[pRenderPassBegin->framebuffer].referencingCmdBuffers.insert(pCB->commandBuffer); 9436 } else { 9437 skipCall |= 9438 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9439 DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()"); 9440 } 9441 } 9442 lock.unlock(); 9443 if (!skipCall) { 9444 dev_data->device_dispatch_table->CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents); 9445 } 9446} 9447 9448VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) { 9449 bool skipCall = false; 9450 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 9451 std::unique_lock<std::mutex> lock(global_lock); 9452 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 9453 if (pCB) { 9454 skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass"); 9455 skipCall |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()"); 9456 pCB->activeSubpass++; 9457 pCB->activeSubpassContents = contents; 9458 TransitionSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass); 9459 if (pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline) { 9460 skipCall |= validatePipelineState(dev_data, pCB, VK_PIPELINE_BIND_POINT_GRAPHICS, 9461 pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline); 9462 } 9463 skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass"); 9464 } 9465 lock.unlock(); 9466 if (!skipCall) 9467 dev_data->device_dispatch_table->CmdNextSubpass(commandBuffer, contents); 9468} 9469 9470VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(VkCommandBuffer commandBuffer) { 9471 bool skipCall = false; 9472 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 9473 std::unique_lock<std::mutex> lock(global_lock); 9474#if MTMERGESOURCE 9475 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 9476 if (cb_data != dev_data->commandBufferMap.end()) { 9477 auto pass_data = dev_data->renderPassMap.find(cb_data->second->activeRenderPass); 9478 if (pass_data != dev_data->renderPassMap.end()) { 9479 RENDER_PASS_NODE* pRPNode = pass_data->second; 9480 for (size_t i = 0; i < pRPNode->attachments.size(); ++i) { 9481 MT_FB_ATTACHMENT_INFO &fb_info = dev_data->frameBufferMap[pRPNode->fb].attachments[i]; 9482 if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_STORE) { 9483 if (cb_data != dev_data->commandBufferMap.end()) { 9484 std::function<bool()> function = [=]() { 9485 set_memory_valid(dev_data, fb_info.mem, true, fb_info.image); 9486 return false; 9487 }; 9488 cb_data->second->validate_functions.push_back(function); 9489 } 9490 } else if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_DONT_CARE) { 9491 if (cb_data != dev_data->commandBufferMap.end()) { 9492 std::function<bool()> function = [=]() { 9493 set_memory_valid(dev_data, fb_info.mem, false, fb_info.image); 9494 return false; 9495 }; 9496 cb_data->second->validate_functions.push_back(function); 9497 } 9498 } 9499 } 9500 } 9501 } 9502#endif 9503 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 9504 if (pCB) { 9505 skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass"); 9506 skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass"); 9507 skipCall |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()"); 9508 TransitionFinalSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo); 9509 pCB->activeRenderPass = 0; 9510 pCB->activeSubpass = 0; 9511 } 9512 lock.unlock(); 9513 if (!skipCall) 9514 dev_data->device_dispatch_table->CmdEndRenderPass(commandBuffer); 9515} 9516 9517static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass, 9518 VkRenderPass primaryPass, uint32_t primaryAttach, uint32_t secondaryAttach, 9519 const char *msg) { 9520 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9521 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", 9522 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64 9523 " that is not compatible with the current render pass %" PRIx64 "." 9524 "Attachment %" PRIu32 " is not compatible with %" PRIu32 ". %s", 9525 (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass), primaryAttach, secondaryAttach, 9526 msg); 9527} 9528 9529static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass, 9530 uint32_t primaryAttach, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass, 9531 uint32_t secondaryAttach, bool is_multi) { 9532 bool skip_call = false; 9533 auto primary_data = dev_data->renderPassMap.find(primaryPass); 9534 auto secondary_data = dev_data->renderPassMap.find(secondaryPass); 9535 if (primary_data->second->pCreateInfo->attachmentCount <= primaryAttach) { 9536 primaryAttach = VK_ATTACHMENT_UNUSED; 9537 } 9538 if (secondary_data->second->pCreateInfo->attachmentCount <= secondaryAttach) { 9539 secondaryAttach = VK_ATTACHMENT_UNUSED; 9540 } 9541 if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) { 9542 return skip_call; 9543 } 9544 if (primaryAttach == VK_ATTACHMENT_UNUSED) { 9545 skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach, 9546 secondaryAttach, "The first is unused while the second is not."); 9547 return skip_call; 9548 } 9549 if (secondaryAttach == VK_ATTACHMENT_UNUSED) { 9550 skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach, 9551 secondaryAttach, "The second is unused while the first is not."); 9552 return skip_call; 9553 } 9554 if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].format != 9555 secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].format) { 9556 skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach, 9557 secondaryAttach, "They have different formats."); 9558 } 9559 if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].samples != 9560 secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].samples) { 9561 skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach, 9562 secondaryAttach, "They have different samples."); 9563 } 9564 if (is_multi && 9565 primary_data->second->pCreateInfo->pAttachments[primaryAttach].flags != 9566 secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].flags) { 9567 skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach, 9568 secondaryAttach, "They have different flags."); 9569 } 9570 return skip_call; 9571} 9572 9573static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass, 9574 VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass, const int subpass, 9575 bool is_multi) { 9576 bool skip_call = false; 9577 auto primary_data = dev_data->renderPassMap.find(primaryPass); 9578 auto secondary_data = dev_data->renderPassMap.find(secondaryPass); 9579 const VkSubpassDescription &primary_desc = primary_data->second->pCreateInfo->pSubpasses[subpass]; 9580 const VkSubpassDescription &secondary_desc = secondary_data->second->pCreateInfo->pSubpasses[subpass]; 9581 uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount); 9582 for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) { 9583 uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED; 9584 if (i < primary_desc.inputAttachmentCount) { 9585 primary_input_attach = primary_desc.pInputAttachments[i].attachment; 9586 } 9587 if (i < secondary_desc.inputAttachmentCount) { 9588 secondary_input_attach = secondary_desc.pInputAttachments[i].attachment; 9589 } 9590 skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_input_attach, secondaryBuffer, 9591 secondaryPass, secondary_input_attach, is_multi); 9592 } 9593 uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount); 9594 for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) { 9595 uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED; 9596 if (i < primary_desc.colorAttachmentCount) { 9597 primary_color_attach = primary_desc.pColorAttachments[i].attachment; 9598 } 9599 if (i < secondary_desc.colorAttachmentCount) { 9600 secondary_color_attach = secondary_desc.pColorAttachments[i].attachment; 9601 } 9602 skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_color_attach, secondaryBuffer, 9603 secondaryPass, secondary_color_attach, is_multi); 9604 uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED; 9605 if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) { 9606 primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment; 9607 } 9608 if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) { 9609 secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment; 9610 } 9611 skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_resolve_attach, secondaryBuffer, 9612 secondaryPass, secondary_resolve_attach, is_multi); 9613 } 9614 uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED; 9615 if (primary_desc.pDepthStencilAttachment) { 9616 primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment; 9617 } 9618 if (secondary_desc.pDepthStencilAttachment) { 9619 secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment; 9620 } 9621 skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_depthstencil_attach, secondaryBuffer, 9622 secondaryPass, secondary_depthstencil_attach, is_multi); 9623 return skip_call; 9624} 9625 9626static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass, 9627 VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass) { 9628 bool skip_call = false; 9629 // Early exit if renderPass objects are identical (and therefore compatible) 9630 if (primaryPass == secondaryPass) 9631 return skip_call; 9632 auto primary_data = dev_data->renderPassMap.find(primaryPass); 9633 auto secondary_data = dev_data->renderPassMap.find(secondaryPass); 9634 if (primary_data == dev_data->renderPassMap.end() || primary_data->second == nullptr) { 9635 skip_call |= 9636 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9637 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", 9638 "vkCmdExecuteCommands() called w/ invalid current Cmd Buffer %p which has invalid render pass %" PRIx64 ".", 9639 (void *)primaryBuffer, (uint64_t)(primaryPass)); 9640 return skip_call; 9641 } 9642 if (secondary_data == dev_data->renderPassMap.end() || secondary_data->second == nullptr) { 9643 skip_call |= 9644 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9645 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", 9646 "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer %p which has invalid render pass %" PRIx64 ".", 9647 (void *)secondaryBuffer, (uint64_t)(secondaryPass)); 9648 return skip_call; 9649 } 9650 if (primary_data->second->pCreateInfo->subpassCount != secondary_data->second->pCreateInfo->subpassCount) { 9651 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9652 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", 9653 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64 9654 " that is not compatible with the current render pass %" PRIx64 "." 9655 "They have a different number of subpasses.", 9656 (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass)); 9657 return skip_call; 9658 } 9659 bool is_multi = primary_data->second->pCreateInfo->subpassCount > 1; 9660 for (uint32_t i = 0; i < primary_data->second->pCreateInfo->subpassCount; ++i) { 9661 skip_call |= 9662 validateSubpassCompatibility(dev_data, primaryBuffer, primaryPass, secondaryBuffer, secondaryPass, i, is_multi); 9663 } 9664 return skip_call; 9665} 9666 9667static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB, 9668 VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) { 9669 bool skip_call = false; 9670 if (!pSubCB->beginInfo.pInheritanceInfo) { 9671 return skip_call; 9672 } 9673 VkFramebuffer primary_fb = dev_data->renderPassMap[pCB->activeRenderPass]->fb; 9674 VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer; 9675 if (secondary_fb != VK_NULL_HANDLE) { 9676 if (primary_fb != secondary_fb) { 9677 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9678 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", 9679 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a framebuffer %" PRIx64 9680 " that is not compatible with the current framebuffer %" PRIx64 ".", 9681 (void *)secondaryBuffer, (uint64_t)(secondary_fb), (uint64_t)(primary_fb)); 9682 } 9683 auto fb_data = dev_data->frameBufferMap.find(secondary_fb); 9684 if (fb_data == dev_data->frameBufferMap.end()) { 9685 skip_call |= 9686 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9687 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p " 9688 "which has invalid framebuffer %" PRIx64 ".", 9689 (void *)secondaryBuffer, (uint64_t)(secondary_fb)); 9690 return skip_call; 9691 } 9692 skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb_data->second.createInfo.renderPass, 9693 secondaryBuffer, pSubCB->beginInfo.pInheritanceInfo->renderPass); 9694 } 9695 return skip_call; 9696} 9697 9698static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) { 9699 bool skipCall = false; 9700 unordered_set<int> activeTypes; 9701 for (auto queryObject : pCB->activeQueries) { 9702 auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool); 9703 if (queryPoolData != dev_data->queryPoolMap.end()) { 9704 if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS && 9705 pSubCB->beginInfo.pInheritanceInfo) { 9706 VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics; 9707 if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) { 9708 skipCall |= log_msg( 9709 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9710 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", 9711 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p " 9712 "which has invalid active query pool %" PRIx64 ". Pipeline statistics is being queried so the command " 9713 "buffer must have all bits set on the queryPool.", 9714 reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first)); 9715 } 9716 } 9717 activeTypes.insert(queryPoolData->second.createInfo.queryType); 9718 } 9719 } 9720 for (auto queryObject : pSubCB->startedQueries) { 9721 auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool); 9722 if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) { 9723 skipCall |= 9724 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9725 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", 9726 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p " 9727 "which has invalid active query pool %" PRIx64 "of type %d but a query of that type has been started on " 9728 "secondary Cmd Buffer %p.", 9729 reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first), 9730 queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer)); 9731 } 9732 } 9733 return skipCall; 9734} 9735 9736VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 9737vkCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) { 9738 bool skipCall = false; 9739 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 9740 std::unique_lock<std::mutex> lock(global_lock); 9741 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 9742 if (pCB) { 9743 GLOBAL_CB_NODE *pSubCB = NULL; 9744 for (uint32_t i = 0; i < commandBuffersCount; i++) { 9745 pSubCB = getCBNode(dev_data, pCommandBuffers[i]); 9746 if (!pSubCB) { 9747 skipCall |= 9748 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9749 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", 9750 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p in element %u of pCommandBuffers array.", 9751 (void *)pCommandBuffers[i], i); 9752 } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) { 9753 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 9754 __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", 9755 "vkCmdExecuteCommands() called w/ Primary Cmd Buffer %p in element %u of pCommandBuffers " 9756 "array. All cmd buffers in pCommandBuffers array must be secondary.", 9757 (void *)pCommandBuffers[i], i); 9758 } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set 9759 if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { 9760 skipCall |= log_msg( 9761 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 9762 (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS", 9763 "vkCmdExecuteCommands(): Secondary Command Buffer (%p) executed within render pass (%#" PRIxLEAST64 9764 ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.", 9765 (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass); 9766 } else { 9767 // Make sure render pass is compatible with parent command buffer pass if has continue 9768 skipCall |= validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass, pCommandBuffers[i], 9769 pSubCB->beginInfo.pInheritanceInfo->renderPass); 9770 skipCall |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB); 9771 } 9772 string errorString = ""; 9773 if (!verify_renderpass_compatibility(dev_data, pCB->activeRenderPass, 9774 pSubCB->beginInfo.pInheritanceInfo->renderPass, errorString)) { 9775 skipCall |= log_msg( 9776 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 9777 (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS", 9778 "vkCmdExecuteCommands(): Secondary Command Buffer (%p) w/ render pass (%#" PRIxLEAST64 9779 ") is incompatible w/ primary command buffer (%p) w/ render pass (%#" PRIxLEAST64 ") due to: %s", 9780 (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer, 9781 (uint64_t)pCB->activeRenderPass, errorString.c_str()); 9782 } 9783 // If framebuffer for secondary CB is not NULL, then it must match FB from vkCmdBeginRenderPass() 9784 // that this CB will be executed in AND framebuffer must have been created w/ RP compatible w/ renderpass 9785 if (pSubCB->beginInfo.pInheritanceInfo->framebuffer) { 9786 if (pSubCB->beginInfo.pInheritanceInfo->framebuffer != pCB->activeRenderPassBeginInfo.framebuffer) { 9787 skipCall |= log_msg( 9788 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 9789 (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS", 9790 "vkCmdExecuteCommands(): Secondary Command Buffer (%p) references framebuffer (%#" PRIxLEAST64 9791 ") that does not match framebuffer (%#" PRIxLEAST64 ") in active renderpass (%#" PRIxLEAST64 ").", 9792 (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->framebuffer, 9793 (uint64_t)pCB->activeRenderPassBeginInfo.framebuffer, (uint64_t)pCB->activeRenderPass); 9794 } 9795 } 9796 } 9797 // TODO(mlentine): Move more logic into this method 9798 skipCall |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB); 9799 skipCall |= validateCommandBufferState(dev_data, pSubCB); 9800 // Secondary cmdBuffers are considered pending execution starting w/ 9801 // being recorded 9802 if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) { 9803 if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) { 9804 skipCall |= log_msg( 9805 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 9806 (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS", 9807 "Attempt to simultaneously execute CB %#" PRIxLEAST64 " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT " 9808 "set!", 9809 (uint64_t)(pCB->commandBuffer)); 9810 } 9811 if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) { 9812 // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous 9813 skipCall |= log_msg( 9814 dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 9815 (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS", 9816 "vkCmdExecuteCommands(): Secondary Command Buffer (%#" PRIxLEAST64 9817 ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer " 9818 "(%#" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT " 9819 "set, even though it does.", 9820 (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer)); 9821 pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT; 9822 } 9823 } 9824 if (!pCB->activeQueries.empty() && !dev_data->phys_dev_properties.features.inheritedQueries) { 9825 skipCall |= 9826 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 9827 reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 9828 "vkCmdExecuteCommands(): Secondary Command Buffer " 9829 "(%#" PRIxLEAST64 ") cannot be submitted with a query in " 9830 "flight and inherited queries not " 9831 "supported on this device.", 9832 reinterpret_cast<uint64_t>(pCommandBuffers[i])); 9833 } 9834 pSubCB->primaryCommandBuffer = pCB->commandBuffer; 9835 pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer); 9836 dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer); 9837 } 9838 skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands"); 9839 skipCall |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()"); 9840 } 9841 lock.unlock(); 9842 if (!skipCall) 9843 dev_data->device_dispatch_table->CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers); 9844} 9845 9846static bool ValidateMapImageLayouts(VkDevice device, VkDeviceMemory mem) { 9847 bool skip_call = false; 9848 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 9849 auto mem_data = dev_data->memObjMap.find(mem); 9850 if ((mem_data != dev_data->memObjMap.end()) && (mem_data->second.image != VK_NULL_HANDLE)) { 9851 std::vector<VkImageLayout> layouts; 9852 if (FindLayouts(dev_data, mem_data->second.image, layouts)) { 9853 for (auto layout : layouts) { 9854 if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) { 9855 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 9856 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only " 9857 "GENERAL or PREINITIALIZED are supported.", 9858 string_VkImageLayout(layout)); 9859 } 9860 } 9861 } 9862 } 9863 return skip_call; 9864} 9865 9866VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 9867vkMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) { 9868 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 9869 9870 bool skip_call = false; 9871 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 9872 std::unique_lock<std::mutex> lock(global_lock); 9873#if MTMERGESOURCE 9874 DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem); 9875 if (pMemObj) { 9876 pMemObj->valid = true; 9877 if ((dev_data->phys_dev_mem_props.memoryTypes[pMemObj->allocInfo.memoryTypeIndex].propertyFlags & 9878 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) { 9879 skip_call = 9880 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 9881 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM", 9882 "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj %#" PRIxLEAST64, (uint64_t)mem); 9883 } 9884 } 9885 skip_call |= validateMemRange(dev_data, mem, offset, size); 9886 storeMemRanges(dev_data, mem, offset, size); 9887#endif 9888 skip_call |= ValidateMapImageLayouts(device, mem); 9889 lock.unlock(); 9890 9891 if (!skip_call) { 9892 result = dev_data->device_dispatch_table->MapMemory(device, mem, offset, size, flags, ppData); 9893#if MTMERGESOURCE 9894 lock.lock(); 9895 initializeAndTrackMemory(dev_data, mem, size, ppData); 9896 lock.unlock(); 9897#endif 9898 } 9899 return result; 9900} 9901 9902#if MTMERGESOURCE 9903VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkUnmapMemory(VkDevice device, VkDeviceMemory mem) { 9904 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 9905 bool skipCall = false; 9906 9907 std::unique_lock<std::mutex> lock(global_lock); 9908 skipCall |= deleteMemRanges(my_data, mem); 9909 lock.unlock(); 9910 if (!skipCall) { 9911 my_data->device_dispatch_table->UnmapMemory(device, mem); 9912 } 9913} 9914 9915static bool validateMemoryIsMapped(layer_data *my_data, const char *funcName, uint32_t memRangeCount, 9916 const VkMappedMemoryRange *pMemRanges) { 9917 bool skipCall = false; 9918 for (uint32_t i = 0; i < memRangeCount; ++i) { 9919 auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory); 9920 if (mem_element != my_data->memObjMap.end()) { 9921 if (mem_element->second.memRange.offset > pMemRanges[i].offset) { 9922 skipCall |= log_msg( 9923 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 9924 (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM", 9925 "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset " 9926 "(" PRINTF_SIZE_T_SPECIFIER ").", 9927 funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_element->second.memRange.offset)); 9928 } 9929 if ((mem_element->second.memRange.size != VK_WHOLE_SIZE) && 9930 ((mem_element->second.memRange.offset + mem_element->second.memRange.size) < 9931 (pMemRanges[i].offset + pMemRanges[i].size))) { 9932 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 9933 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__, 9934 MEMTRACK_INVALID_MAP, "MEM", "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER 9935 ") exceeds the Memory Object's upper-bound " 9936 "(" PRINTF_SIZE_T_SPECIFIER ").", 9937 funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size), 9938 static_cast<size_t>(mem_element->second.memRange.offset + mem_element->second.memRange.size)); 9939 } 9940 } 9941 } 9942 return skipCall; 9943} 9944 9945static bool validateAndCopyNoncoherentMemoryToDriver(layer_data *my_data, uint32_t memRangeCount, 9946 const VkMappedMemoryRange *pMemRanges) { 9947 bool skipCall = false; 9948 for (uint32_t i = 0; i < memRangeCount; ++i) { 9949 auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory); 9950 if (mem_element != my_data->memObjMap.end()) { 9951 if (mem_element->second.pData) { 9952 VkDeviceSize size = mem_element->second.memRange.size; 9953 VkDeviceSize half_size = (size / 2); 9954 char *data = static_cast<char *>(mem_element->second.pData); 9955 for (auto j = 0; j < half_size; ++j) { 9956 if (data[j] != NoncoherentMemoryFillValue) { 9957 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 9958 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__, 9959 MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64, 9960 (uint64_t)pMemRanges[i].memory); 9961 } 9962 } 9963 for (auto j = size + half_size; j < 2 * size; ++j) { 9964 if (data[j] != NoncoherentMemoryFillValue) { 9965 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 9966 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__, 9967 MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64, 9968 (uint64_t)pMemRanges[i].memory); 9969 } 9970 } 9971 memcpy(mem_element->second.pDriverData, static_cast<void *>(data + (size_t)(half_size)), (size_t)(size)); 9972 } 9973 } 9974 } 9975 return skipCall; 9976} 9977 9978VK_LAYER_EXPORT VkResult VKAPI_CALL 9979vkFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) { 9980 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 9981 bool skipCall = false; 9982 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 9983 9984 std::unique_lock<std::mutex> lock(global_lock); 9985 skipCall |= validateAndCopyNoncoherentMemoryToDriver(my_data, memRangeCount, pMemRanges); 9986 skipCall |= validateMemoryIsMapped(my_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges); 9987 lock.unlock(); 9988 if (!skipCall) { 9989 result = my_data->device_dispatch_table->FlushMappedMemoryRanges(device, memRangeCount, pMemRanges); 9990 } 9991 return result; 9992} 9993 9994VK_LAYER_EXPORT VkResult VKAPI_CALL 9995vkInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) { 9996 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 9997 bool skipCall = false; 9998 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 9999 10000 std::unique_lock<std::mutex> lock(global_lock); 10001 skipCall |= validateMemoryIsMapped(my_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges); 10002 lock.unlock(); 10003 if (!skipCall) { 10004 result = my_data->device_dispatch_table->InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges); 10005 } 10006 return result; 10007} 10008#endif 10009 10010VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) { 10011 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 10012 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 10013 bool skipCall = false; 10014 std::unique_lock<std::mutex> lock(global_lock); 10015 auto image_node = dev_data->imageMap.find(image); 10016 if (image_node != dev_data->imageMap.end()) { 10017 // Track objects tied to memory 10018 uint64_t image_handle = reinterpret_cast<uint64_t&>(image); 10019 skipCall = set_mem_binding(dev_data, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory"); 10020 VkMemoryRequirements memRequirements; 10021 lock.unlock(); 10022 dev_data->device_dispatch_table->GetImageMemoryRequirements(device, image, &memRequirements); 10023 lock.lock(); 10024 skipCall |= validate_buffer_image_aliasing(dev_data, image_handle, mem, memoryOffset, memRequirements, 10025 dev_data->memObjMap[mem].imageRanges, dev_data->memObjMap[mem].bufferRanges, 10026 VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT); 10027 print_mem_list(dev_data); 10028 lock.unlock(); 10029 if (!skipCall) { 10030 result = dev_data->device_dispatch_table->BindImageMemory(device, image, mem, memoryOffset); 10031 lock.lock(); 10032 dev_data->memObjMap[mem].image = image; 10033 image_node->second.mem = mem; 10034 image_node->second.memOffset = memoryOffset; 10035 image_node->second.memSize = memRequirements.size; 10036 lock.unlock(); 10037 } 10038 } else { 10039 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 10040 reinterpret_cast<const uint64_t &>(image), __LINE__, MEMTRACK_INVALID_OBJECT, "MT", 10041 "vkBindImageMemory: Cannot find invalid image %" PRIx64 ", has it already been deleted?", 10042 reinterpret_cast<const uint64_t &>(image)); 10043 } 10044 return result; 10045} 10046 10047VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent(VkDevice device, VkEvent event) { 10048 bool skip_call = false; 10049 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 10050 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 10051 std::unique_lock<std::mutex> lock(global_lock); 10052 auto event_node = dev_data->eventMap.find(event); 10053 if (event_node != dev_data->eventMap.end()) { 10054 event_node->second.needsSignaled = false; 10055 event_node->second.stageMask = VK_PIPELINE_STAGE_HOST_BIT; 10056 if (event_node->second.in_use.load()) { 10057 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT, 10058 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS", 10059 "Cannot call vkSetEvent() on event %" PRIxLEAST64 " that is already in use by a command buffer.", 10060 reinterpret_cast<const uint64_t &>(event)); 10061 } 10062 } 10063 lock.unlock(); 10064 // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event 10065 // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the 10066 // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297) 10067 for (auto queue_data : dev_data->queueMap) { 10068 auto event_entry = queue_data.second.eventToStageMap.find(event); 10069 if (event_entry != queue_data.second.eventToStageMap.end()) { 10070 event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT; 10071 } 10072 } 10073 if (!skip_call) 10074 result = dev_data->device_dispatch_table->SetEvent(device, event); 10075 return result; 10076} 10077 10078VKAPI_ATTR VkResult VKAPI_CALL 10079vkQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) { 10080 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map); 10081 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 10082 bool skip_call = false; 10083 std::unique_lock<std::mutex> lock(global_lock); 10084 // First verify that fence is not in use 10085 if (fence != VK_NULL_HANDLE) { 10086 dev_data->fenceMap[fence].queue = queue; 10087 if ((bindInfoCount != 0) && dev_data->fenceMap[fence].in_use.load()) { 10088 skip_call |= 10089 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, 10090 reinterpret_cast<uint64_t &>(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS", 10091 "Fence %#" PRIx64 " is already in use by another submission.", reinterpret_cast<uint64_t &>(fence)); 10092 } 10093 if (!dev_data->fenceMap[fence].needsSignaled) { 10094 skip_call |= 10095 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, 10096 reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM", 10097 "Fence %#" PRIxLEAST64 " submitted in SIGNALED state. Fences must be reset before being submitted", 10098 reinterpret_cast<uint64_t &>(fence)); 10099 } 10100 } 10101 for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) { 10102 const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx]; 10103 // Track objects tied to memory 10104 for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) { 10105 for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) { 10106 if (set_sparse_mem_binding(dev_data, bindInfo.pBufferBinds[j].pBinds[k].memory, 10107 (uint64_t)bindInfo.pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, 10108 "vkQueueBindSparse")) 10109 skip_call = true; 10110 } 10111 } 10112 for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) { 10113 for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) { 10114 if (set_sparse_mem_binding(dev_data, bindInfo.pImageOpaqueBinds[j].pBinds[k].memory, 10115 (uint64_t)bindInfo.pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 10116 "vkQueueBindSparse")) 10117 skip_call = true; 10118 } 10119 } 10120 for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) { 10121 for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) { 10122 if (set_sparse_mem_binding(dev_data, bindInfo.pImageBinds[j].pBinds[k].memory, 10123 (uint64_t)bindInfo.pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 10124 "vkQueueBindSparse")) 10125 skip_call = true; 10126 } 10127 } 10128 for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) { 10129 const VkSemaphore &semaphore = bindInfo.pWaitSemaphores[i]; 10130 if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) { 10131 if (dev_data->semaphoreMap[semaphore].signaled) { 10132 dev_data->semaphoreMap[semaphore].signaled = false; 10133 } else { 10134 skip_call |= 10135 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, 10136 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS", 10137 "vkQueueBindSparse: Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 10138 " that has no way to be signaled.", 10139 reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore)); 10140 } 10141 } 10142 } 10143 for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) { 10144 const VkSemaphore &semaphore = bindInfo.pSignalSemaphores[i]; 10145 if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) { 10146 if (dev_data->semaphoreMap[semaphore].signaled) { 10147 skip_call = 10148 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, 10149 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS", 10150 "vkQueueBindSparse: Queue %#" PRIx64 " is signaling semaphore %#" PRIx64 10151 ", but that semaphore is already signaled.", 10152 reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore)); 10153 } 10154 dev_data->semaphoreMap[semaphore].signaled = true; 10155 } 10156 } 10157 } 10158 print_mem_list(dev_data); 10159 lock.unlock(); 10160 10161 if (!skip_call) 10162 return dev_data->device_dispatch_table->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence); 10163 10164 return result; 10165} 10166 10167VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo, 10168 const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) { 10169 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 10170 VkResult result = dev_data->device_dispatch_table->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore); 10171 if (result == VK_SUCCESS) { 10172 std::lock_guard<std::mutex> lock(global_lock); 10173 SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore]; 10174 sNode->signaled = false; 10175 sNode->queue = VK_NULL_HANDLE; 10176 sNode->in_use.store(0); 10177 } 10178 return result; 10179} 10180 10181VKAPI_ATTR VkResult VKAPI_CALL 10182vkCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) { 10183 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 10184 VkResult result = dev_data->device_dispatch_table->CreateEvent(device, pCreateInfo, pAllocator, pEvent); 10185 if (result == VK_SUCCESS) { 10186 std::lock_guard<std::mutex> lock(global_lock); 10187 dev_data->eventMap[*pEvent].needsSignaled = false; 10188 dev_data->eventMap[*pEvent].in_use.store(0); 10189 dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0); 10190 } 10191 return result; 10192} 10193 10194VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo, 10195 const VkAllocationCallbacks *pAllocator, 10196 VkSwapchainKHR *pSwapchain) { 10197 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 10198 VkResult result = dev_data->device_dispatch_table->CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain); 10199 10200 if (VK_SUCCESS == result) { 10201 SWAPCHAIN_NODE *psc_node = new SWAPCHAIN_NODE(pCreateInfo); 10202 std::lock_guard<std::mutex> lock(global_lock); 10203 dev_data->device_extensions.swapchainMap[*pSwapchain] = psc_node; 10204 } 10205 10206 return result; 10207} 10208 10209VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 10210vkDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) { 10211 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 10212 bool skipCall = false; 10213 10214 std::unique_lock<std::mutex> lock(global_lock); 10215 auto swapchain_data = dev_data->device_extensions.swapchainMap.find(swapchain); 10216 if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) { 10217 if (swapchain_data->second->images.size() > 0) { 10218 for (auto swapchain_image : swapchain_data->second->images) { 10219 auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image); 10220 if (image_sub != dev_data->imageSubresourceMap.end()) { 10221 for (auto imgsubpair : image_sub->second) { 10222 auto image_item = dev_data->imageLayoutMap.find(imgsubpair); 10223 if (image_item != dev_data->imageLayoutMap.end()) { 10224 dev_data->imageLayoutMap.erase(image_item); 10225 } 10226 } 10227 dev_data->imageSubresourceMap.erase(image_sub); 10228 } 10229 skipCall = clear_object_binding(dev_data, (uint64_t)swapchain_image, 10230 VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT); 10231 dev_data->imageMap.erase(swapchain_image); 10232 } 10233 } 10234 delete swapchain_data->second; 10235 dev_data->device_extensions.swapchainMap.erase(swapchain); 10236 } 10237 lock.unlock(); 10238 if (!skipCall) 10239 dev_data->device_dispatch_table->DestroySwapchainKHR(device, swapchain, pAllocator); 10240} 10241 10242VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 10243vkGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) { 10244 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 10245 VkResult result = dev_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages); 10246 10247 if (result == VK_SUCCESS && pSwapchainImages != NULL) { 10248 // This should never happen and is checked by param checker. 10249 if (!pCount) 10250 return result; 10251 std::lock_guard<std::mutex> lock(global_lock); 10252 const size_t count = *pCount; 10253 auto swapchain_node = dev_data->device_extensions.swapchainMap[swapchain]; 10254 if (!swapchain_node->images.empty()) { 10255 // TODO : Not sure I like the memcmp here, but it works 10256 const bool mismatch = (swapchain_node->images.size() != count || 10257 memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count)); 10258 if (mismatch) { 10259 // TODO: Verify against Valid Usage section of extension 10260 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, 10261 (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN", 10262 "vkGetSwapchainInfoKHR(%" PRIu64 10263 ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data", 10264 (uint64_t)(swapchain)); 10265 } 10266 } 10267 for (uint32_t i = 0; i < *pCount; ++i) { 10268 IMAGE_LAYOUT_NODE image_layout_node; 10269 image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED; 10270 image_layout_node.format = swapchain_node->createInfo.imageFormat; 10271 auto &image_node = dev_data->imageMap[pSwapchainImages[i]]; 10272 image_node.createInfo.mipLevels = 1; 10273 image_node.createInfo.arrayLayers = swapchain_node->createInfo.imageArrayLayers; 10274 image_node.createInfo.usage = swapchain_node->createInfo.imageUsage; 10275 image_node.valid = false; 10276 image_node.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY; 10277 swapchain_node->images.push_back(pSwapchainImages[i]); 10278 ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()}; 10279 dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair); 10280 dev_data->imageLayoutMap[subpair] = image_layout_node; 10281 dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain; 10282 } 10283 } 10284 return result; 10285} 10286 10287VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) { 10288 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map); 10289 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 10290 bool skip_call = false; 10291 10292 if (pPresentInfo) { 10293 std::lock_guard<std::mutex> lock(global_lock); 10294 for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) { 10295 const VkSemaphore &semaphore = pPresentInfo->pWaitSemaphores[i]; 10296 if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) { 10297 if (dev_data->semaphoreMap[semaphore].signaled) { 10298 dev_data->semaphoreMap[semaphore].signaled = false; 10299 } else { 10300 skip_call |= 10301 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 10302 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS", 10303 "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.", 10304 reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore)); 10305 } 10306 } 10307 } 10308 VkDeviceMemory mem; 10309 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) { 10310 auto swapchain_data = dev_data->device_extensions.swapchainMap.find(pPresentInfo->pSwapchains[i]); 10311 if (swapchain_data != dev_data->device_extensions.swapchainMap.end() && 10312 pPresentInfo->pImageIndices[i] < swapchain_data->second->images.size()) { 10313 VkImage image = swapchain_data->second->images[pPresentInfo->pImageIndices[i]]; 10314#if MTMERGESOURCE 10315 skip_call |= 10316 get_mem_binding_from_object(dev_data, (uint64_t)(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem); 10317 skip_call |= validate_memory_is_valid(dev_data, mem, "vkQueuePresentKHR()", image); 10318#endif 10319 vector<VkImageLayout> layouts; 10320 if (FindLayouts(dev_data, image, layouts)) { 10321 for (auto layout : layouts) { 10322 if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) { 10323 skip_call |= 10324 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, 10325 reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 10326 "Images passed to present must be in layout " 10327 "PRESENT_SOURCE_KHR but is in %s", 10328 string_VkImageLayout(layout)); 10329 } 10330 } 10331 } 10332 } 10333 } 10334 } 10335 10336 if (!skip_call) 10337 result = dev_data->device_dispatch_table->QueuePresentKHR(queue, pPresentInfo); 10338 10339 return result; 10340} 10341 10342VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, 10343 VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) { 10344 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 10345 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 10346 bool skipCall = false; 10347 10348 std::unique_lock<std::mutex> lock(global_lock); 10349 if (semaphore != VK_NULL_HANDLE && 10350 dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) { 10351 if (dev_data->semaphoreMap[semaphore].signaled) { 10352 skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, 10353 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS", 10354 "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state"); 10355 } 10356 dev_data->semaphoreMap[semaphore].signaled = true; 10357 } 10358 auto fence_data = dev_data->fenceMap.find(fence); 10359 if (fence_data != dev_data->fenceMap.end()) { 10360 fence_data->second.swapchain = swapchain; 10361 } 10362 lock.unlock(); 10363 10364 if (!skipCall) { 10365 result = 10366 dev_data->device_dispatch_table->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex); 10367 } 10368 10369 return result; 10370} 10371 10372VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 10373vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo, 10374 const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) { 10375 layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); 10376 VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table; 10377 VkResult res = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback); 10378 if (VK_SUCCESS == res) { 10379 std::lock_guard<std::mutex> lock(global_lock); 10380 res = layer_create_msg_callback(my_data->report_data, pCreateInfo, pAllocator, pMsgCallback); 10381 } 10382 return res; 10383} 10384 10385VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkInstance instance, 10386 VkDebugReportCallbackEXT msgCallback, 10387 const VkAllocationCallbacks *pAllocator) { 10388 layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); 10389 VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table; 10390 pTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator); 10391 std::lock_guard<std::mutex> lock(global_lock); 10392 layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator); 10393} 10394 10395VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 10396vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object, 10397 size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) { 10398 layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); 10399 my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, 10400 pMsg); 10401} 10402 10403VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) { 10404 if (!strcmp(funcName, "vkGetDeviceProcAddr")) 10405 return (PFN_vkVoidFunction)vkGetDeviceProcAddr; 10406 if (!strcmp(funcName, "vkDestroyDevice")) 10407 return (PFN_vkVoidFunction)vkDestroyDevice; 10408 if (!strcmp(funcName, "vkQueueSubmit")) 10409 return (PFN_vkVoidFunction)vkQueueSubmit; 10410 if (!strcmp(funcName, "vkWaitForFences")) 10411 return (PFN_vkVoidFunction)vkWaitForFences; 10412 if (!strcmp(funcName, "vkGetFenceStatus")) 10413 return (PFN_vkVoidFunction)vkGetFenceStatus; 10414 if (!strcmp(funcName, "vkQueueWaitIdle")) 10415 return (PFN_vkVoidFunction)vkQueueWaitIdle; 10416 if (!strcmp(funcName, "vkDeviceWaitIdle")) 10417 return (PFN_vkVoidFunction)vkDeviceWaitIdle; 10418 if (!strcmp(funcName, "vkGetDeviceQueue")) 10419 return (PFN_vkVoidFunction)vkGetDeviceQueue; 10420 if (!strcmp(funcName, "vkDestroyInstance")) 10421 return (PFN_vkVoidFunction)vkDestroyInstance; 10422 if (!strcmp(funcName, "vkDestroyDevice")) 10423 return (PFN_vkVoidFunction)vkDestroyDevice; 10424 if (!strcmp(funcName, "vkDestroyFence")) 10425 return (PFN_vkVoidFunction)vkDestroyFence; 10426 if (!strcmp(funcName, "vkResetFences")) 10427 return (PFN_vkVoidFunction)vkResetFences; 10428 if (!strcmp(funcName, "vkDestroySemaphore")) 10429 return (PFN_vkVoidFunction)vkDestroySemaphore; 10430 if (!strcmp(funcName, "vkDestroyEvent")) 10431 return (PFN_vkVoidFunction)vkDestroyEvent; 10432 if (!strcmp(funcName, "vkDestroyQueryPool")) 10433 return (PFN_vkVoidFunction)vkDestroyQueryPool; 10434 if (!strcmp(funcName, "vkDestroyBuffer")) 10435 return (PFN_vkVoidFunction)vkDestroyBuffer; 10436 if (!strcmp(funcName, "vkDestroyBufferView")) 10437 return (PFN_vkVoidFunction)vkDestroyBufferView; 10438 if (!strcmp(funcName, "vkDestroyImage")) 10439 return (PFN_vkVoidFunction)vkDestroyImage; 10440 if (!strcmp(funcName, "vkDestroyImageView")) 10441 return (PFN_vkVoidFunction)vkDestroyImageView; 10442 if (!strcmp(funcName, "vkDestroyShaderModule")) 10443 return (PFN_vkVoidFunction)vkDestroyShaderModule; 10444 if (!strcmp(funcName, "vkDestroyPipeline")) 10445 return (PFN_vkVoidFunction)vkDestroyPipeline; 10446 if (!strcmp(funcName, "vkDestroyPipelineLayout")) 10447 return (PFN_vkVoidFunction)vkDestroyPipelineLayout; 10448 if (!strcmp(funcName, "vkDestroySampler")) 10449 return (PFN_vkVoidFunction)vkDestroySampler; 10450 if (!strcmp(funcName, "vkDestroyDescriptorSetLayout")) 10451 return (PFN_vkVoidFunction)vkDestroyDescriptorSetLayout; 10452 if (!strcmp(funcName, "vkDestroyDescriptorPool")) 10453 return (PFN_vkVoidFunction)vkDestroyDescriptorPool; 10454 if (!strcmp(funcName, "vkDestroyFramebuffer")) 10455 return (PFN_vkVoidFunction)vkDestroyFramebuffer; 10456 if (!strcmp(funcName, "vkDestroyRenderPass")) 10457 return (PFN_vkVoidFunction)vkDestroyRenderPass; 10458 if (!strcmp(funcName, "vkCreateBuffer")) 10459 return (PFN_vkVoidFunction)vkCreateBuffer; 10460 if (!strcmp(funcName, "vkCreateBufferView")) 10461 return (PFN_vkVoidFunction)vkCreateBufferView; 10462 if (!strcmp(funcName, "vkCreateImage")) 10463 return (PFN_vkVoidFunction)vkCreateImage; 10464 if (!strcmp(funcName, "vkCreateImageView")) 10465 return (PFN_vkVoidFunction)vkCreateImageView; 10466 if (!strcmp(funcName, "vkCreateFence")) 10467 return (PFN_vkVoidFunction)vkCreateFence; 10468 if (!strcmp(funcName, "CreatePipelineCache")) 10469 return (PFN_vkVoidFunction)vkCreatePipelineCache; 10470 if (!strcmp(funcName, "DestroyPipelineCache")) 10471 return (PFN_vkVoidFunction)vkDestroyPipelineCache; 10472 if (!strcmp(funcName, "GetPipelineCacheData")) 10473 return (PFN_vkVoidFunction)vkGetPipelineCacheData; 10474 if (!strcmp(funcName, "MergePipelineCaches")) 10475 return (PFN_vkVoidFunction)vkMergePipelineCaches; 10476 if (!strcmp(funcName, "vkCreateGraphicsPipelines")) 10477 return (PFN_vkVoidFunction)vkCreateGraphicsPipelines; 10478 if (!strcmp(funcName, "vkCreateComputePipelines")) 10479 return (PFN_vkVoidFunction)vkCreateComputePipelines; 10480 if (!strcmp(funcName, "vkCreateSampler")) 10481 return (PFN_vkVoidFunction)vkCreateSampler; 10482 if (!strcmp(funcName, "vkCreateDescriptorSetLayout")) 10483 return (PFN_vkVoidFunction)vkCreateDescriptorSetLayout; 10484 if (!strcmp(funcName, "vkCreatePipelineLayout")) 10485 return (PFN_vkVoidFunction)vkCreatePipelineLayout; 10486 if (!strcmp(funcName, "vkCreateDescriptorPool")) 10487 return (PFN_vkVoidFunction)vkCreateDescriptorPool; 10488 if (!strcmp(funcName, "vkResetDescriptorPool")) 10489 return (PFN_vkVoidFunction)vkResetDescriptorPool; 10490 if (!strcmp(funcName, "vkAllocateDescriptorSets")) 10491 return (PFN_vkVoidFunction)vkAllocateDescriptorSets; 10492 if (!strcmp(funcName, "vkFreeDescriptorSets")) 10493 return (PFN_vkVoidFunction)vkFreeDescriptorSets; 10494 if (!strcmp(funcName, "vkUpdateDescriptorSets")) 10495 return (PFN_vkVoidFunction)vkUpdateDescriptorSets; 10496 if (!strcmp(funcName, "vkCreateCommandPool")) 10497 return (PFN_vkVoidFunction)vkCreateCommandPool; 10498 if (!strcmp(funcName, "vkDestroyCommandPool")) 10499 return (PFN_vkVoidFunction)vkDestroyCommandPool; 10500 if (!strcmp(funcName, "vkResetCommandPool")) 10501 return (PFN_vkVoidFunction)vkResetCommandPool; 10502 if (!strcmp(funcName, "vkCreateQueryPool")) 10503 return (PFN_vkVoidFunction)vkCreateQueryPool; 10504 if (!strcmp(funcName, "vkAllocateCommandBuffers")) 10505 return (PFN_vkVoidFunction)vkAllocateCommandBuffers; 10506 if (!strcmp(funcName, "vkFreeCommandBuffers")) 10507 return (PFN_vkVoidFunction)vkFreeCommandBuffers; 10508 if (!strcmp(funcName, "vkBeginCommandBuffer")) 10509 return (PFN_vkVoidFunction)vkBeginCommandBuffer; 10510 if (!strcmp(funcName, "vkEndCommandBuffer")) 10511 return (PFN_vkVoidFunction)vkEndCommandBuffer; 10512 if (!strcmp(funcName, "vkResetCommandBuffer")) 10513 return (PFN_vkVoidFunction)vkResetCommandBuffer; 10514 if (!strcmp(funcName, "vkCmdBindPipeline")) 10515 return (PFN_vkVoidFunction)vkCmdBindPipeline; 10516 if (!strcmp(funcName, "vkCmdSetViewport")) 10517 return (PFN_vkVoidFunction)vkCmdSetViewport; 10518 if (!strcmp(funcName, "vkCmdSetScissor")) 10519 return (PFN_vkVoidFunction)vkCmdSetScissor; 10520 if (!strcmp(funcName, "vkCmdSetLineWidth")) 10521 return (PFN_vkVoidFunction)vkCmdSetLineWidth; 10522 if (!strcmp(funcName, "vkCmdSetDepthBias")) 10523 return (PFN_vkVoidFunction)vkCmdSetDepthBias; 10524 if (!strcmp(funcName, "vkCmdSetBlendConstants")) 10525 return (PFN_vkVoidFunction)vkCmdSetBlendConstants; 10526 if (!strcmp(funcName, "vkCmdSetDepthBounds")) 10527 return (PFN_vkVoidFunction)vkCmdSetDepthBounds; 10528 if (!strcmp(funcName, "vkCmdSetStencilCompareMask")) 10529 return (PFN_vkVoidFunction)vkCmdSetStencilCompareMask; 10530 if (!strcmp(funcName, "vkCmdSetStencilWriteMask")) 10531 return (PFN_vkVoidFunction)vkCmdSetStencilWriteMask; 10532 if (!strcmp(funcName, "vkCmdSetStencilReference")) 10533 return (PFN_vkVoidFunction)vkCmdSetStencilReference; 10534 if (!strcmp(funcName, "vkCmdBindDescriptorSets")) 10535 return (PFN_vkVoidFunction)vkCmdBindDescriptorSets; 10536 if (!strcmp(funcName, "vkCmdBindVertexBuffers")) 10537 return (PFN_vkVoidFunction)vkCmdBindVertexBuffers; 10538 if (!strcmp(funcName, "vkCmdBindIndexBuffer")) 10539 return (PFN_vkVoidFunction)vkCmdBindIndexBuffer; 10540 if (!strcmp(funcName, "vkCmdDraw")) 10541 return (PFN_vkVoidFunction)vkCmdDraw; 10542 if (!strcmp(funcName, "vkCmdDrawIndexed")) 10543 return (PFN_vkVoidFunction)vkCmdDrawIndexed; 10544 if (!strcmp(funcName, "vkCmdDrawIndirect")) 10545 return (PFN_vkVoidFunction)vkCmdDrawIndirect; 10546 if (!strcmp(funcName, "vkCmdDrawIndexedIndirect")) 10547 return (PFN_vkVoidFunction)vkCmdDrawIndexedIndirect; 10548 if (!strcmp(funcName, "vkCmdDispatch")) 10549 return (PFN_vkVoidFunction)vkCmdDispatch; 10550 if (!strcmp(funcName, "vkCmdDispatchIndirect")) 10551 return (PFN_vkVoidFunction)vkCmdDispatchIndirect; 10552 if (!strcmp(funcName, "vkCmdCopyBuffer")) 10553 return (PFN_vkVoidFunction)vkCmdCopyBuffer; 10554 if (!strcmp(funcName, "vkCmdCopyImage")) 10555 return (PFN_vkVoidFunction)vkCmdCopyImage; 10556 if (!strcmp(funcName, "vkCmdBlitImage")) 10557 return (PFN_vkVoidFunction)vkCmdBlitImage; 10558 if (!strcmp(funcName, "vkCmdCopyBufferToImage")) 10559 return (PFN_vkVoidFunction)vkCmdCopyBufferToImage; 10560 if (!strcmp(funcName, "vkCmdCopyImageToBuffer")) 10561 return (PFN_vkVoidFunction)vkCmdCopyImageToBuffer; 10562 if (!strcmp(funcName, "vkCmdUpdateBuffer")) 10563 return (PFN_vkVoidFunction)vkCmdUpdateBuffer; 10564 if (!strcmp(funcName, "vkCmdFillBuffer")) 10565 return (PFN_vkVoidFunction)vkCmdFillBuffer; 10566 if (!strcmp(funcName, "vkCmdClearColorImage")) 10567 return (PFN_vkVoidFunction)vkCmdClearColorImage; 10568 if (!strcmp(funcName, "vkCmdClearDepthStencilImage")) 10569 return (PFN_vkVoidFunction)vkCmdClearDepthStencilImage; 10570 if (!strcmp(funcName, "vkCmdClearAttachments")) 10571 return (PFN_vkVoidFunction)vkCmdClearAttachments; 10572 if (!strcmp(funcName, "vkCmdResolveImage")) 10573 return (PFN_vkVoidFunction)vkCmdResolveImage; 10574 if (!strcmp(funcName, "vkCmdSetEvent")) 10575 return (PFN_vkVoidFunction)vkCmdSetEvent; 10576 if (!strcmp(funcName, "vkCmdResetEvent")) 10577 return (PFN_vkVoidFunction)vkCmdResetEvent; 10578 if (!strcmp(funcName, "vkCmdWaitEvents")) 10579 return (PFN_vkVoidFunction)vkCmdWaitEvents; 10580 if (!strcmp(funcName, "vkCmdPipelineBarrier")) 10581 return (PFN_vkVoidFunction)vkCmdPipelineBarrier; 10582 if (!strcmp(funcName, "vkCmdBeginQuery")) 10583 return (PFN_vkVoidFunction)vkCmdBeginQuery; 10584 if (!strcmp(funcName, "vkCmdEndQuery")) 10585 return (PFN_vkVoidFunction)vkCmdEndQuery; 10586 if (!strcmp(funcName, "vkCmdResetQueryPool")) 10587 return (PFN_vkVoidFunction)vkCmdResetQueryPool; 10588 if (!strcmp(funcName, "vkCmdCopyQueryPoolResults")) 10589 return (PFN_vkVoidFunction)vkCmdCopyQueryPoolResults; 10590 if (!strcmp(funcName, "vkCmdPushConstants")) 10591 return (PFN_vkVoidFunction)vkCmdPushConstants; 10592 if (!strcmp(funcName, "vkCmdWriteTimestamp")) 10593 return (PFN_vkVoidFunction)vkCmdWriteTimestamp; 10594 if (!strcmp(funcName, "vkCreateFramebuffer")) 10595 return (PFN_vkVoidFunction)vkCreateFramebuffer; 10596 if (!strcmp(funcName, "vkCreateShaderModule")) 10597 return (PFN_vkVoidFunction)vkCreateShaderModule; 10598 if (!strcmp(funcName, "vkCreateRenderPass")) 10599 return (PFN_vkVoidFunction)vkCreateRenderPass; 10600 if (!strcmp(funcName, "vkCmdBeginRenderPass")) 10601 return (PFN_vkVoidFunction)vkCmdBeginRenderPass; 10602 if (!strcmp(funcName, "vkCmdNextSubpass")) 10603 return (PFN_vkVoidFunction)vkCmdNextSubpass; 10604 if (!strcmp(funcName, "vkCmdEndRenderPass")) 10605 return (PFN_vkVoidFunction)vkCmdEndRenderPass; 10606 if (!strcmp(funcName, "vkCmdExecuteCommands")) 10607 return (PFN_vkVoidFunction)vkCmdExecuteCommands; 10608 if (!strcmp(funcName, "vkSetEvent")) 10609 return (PFN_vkVoidFunction)vkSetEvent; 10610 if (!strcmp(funcName, "vkMapMemory")) 10611 return (PFN_vkVoidFunction)vkMapMemory; 10612#if MTMERGESOURCE 10613 if (!strcmp(funcName, "vkUnmapMemory")) 10614 return (PFN_vkVoidFunction)vkUnmapMemory; 10615 if (!strcmp(funcName, "vkAllocateMemory")) 10616 return (PFN_vkVoidFunction)vkAllocateMemory; 10617 if (!strcmp(funcName, "vkFreeMemory")) 10618 return (PFN_vkVoidFunction)vkFreeMemory; 10619 if (!strcmp(funcName, "vkFlushMappedMemoryRanges")) 10620 return (PFN_vkVoidFunction)vkFlushMappedMemoryRanges; 10621 if (!strcmp(funcName, "vkInvalidateMappedMemoryRanges")) 10622 return (PFN_vkVoidFunction)vkInvalidateMappedMemoryRanges; 10623 if (!strcmp(funcName, "vkBindBufferMemory")) 10624 return (PFN_vkVoidFunction)vkBindBufferMemory; 10625 if (!strcmp(funcName, "vkGetBufferMemoryRequirements")) 10626 return (PFN_vkVoidFunction)vkGetBufferMemoryRequirements; 10627 if (!strcmp(funcName, "vkGetImageMemoryRequirements")) 10628 return (PFN_vkVoidFunction)vkGetImageMemoryRequirements; 10629#endif 10630 if (!strcmp(funcName, "vkGetQueryPoolResults")) 10631 return (PFN_vkVoidFunction)vkGetQueryPoolResults; 10632 if (!strcmp(funcName, "vkBindImageMemory")) 10633 return (PFN_vkVoidFunction)vkBindImageMemory; 10634 if (!strcmp(funcName, "vkQueueBindSparse")) 10635 return (PFN_vkVoidFunction)vkQueueBindSparse; 10636 if (!strcmp(funcName, "vkCreateSemaphore")) 10637 return (PFN_vkVoidFunction)vkCreateSemaphore; 10638 if (!strcmp(funcName, "vkCreateEvent")) 10639 return (PFN_vkVoidFunction)vkCreateEvent; 10640 10641 if (dev == NULL) 10642 return NULL; 10643 10644 layer_data *dev_data; 10645 dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map); 10646 10647 if (dev_data->device_extensions.wsi_enabled) { 10648 if (!strcmp(funcName, "vkCreateSwapchainKHR")) 10649 return (PFN_vkVoidFunction)vkCreateSwapchainKHR; 10650 if (!strcmp(funcName, "vkDestroySwapchainKHR")) 10651 return (PFN_vkVoidFunction)vkDestroySwapchainKHR; 10652 if (!strcmp(funcName, "vkGetSwapchainImagesKHR")) 10653 return (PFN_vkVoidFunction)vkGetSwapchainImagesKHR; 10654 if (!strcmp(funcName, "vkAcquireNextImageKHR")) 10655 return (PFN_vkVoidFunction)vkAcquireNextImageKHR; 10656 if (!strcmp(funcName, "vkQueuePresentKHR")) 10657 return (PFN_vkVoidFunction)vkQueuePresentKHR; 10658 } 10659 10660 VkLayerDispatchTable *pTable = dev_data->device_dispatch_table; 10661 { 10662 if (pTable->GetDeviceProcAddr == NULL) 10663 return NULL; 10664 return pTable->GetDeviceProcAddr(dev, funcName); 10665 } 10666} 10667 10668VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) { 10669 if (!strcmp(funcName, "vkGetInstanceProcAddr")) 10670 return (PFN_vkVoidFunction)vkGetInstanceProcAddr; 10671 if (!strcmp(funcName, "vkGetDeviceProcAddr")) 10672 return (PFN_vkVoidFunction)vkGetDeviceProcAddr; 10673 if (!strcmp(funcName, "vkCreateInstance")) 10674 return (PFN_vkVoidFunction)vkCreateInstance; 10675 if (!strcmp(funcName, "vkCreateDevice")) 10676 return (PFN_vkVoidFunction)vkCreateDevice; 10677 if (!strcmp(funcName, "vkDestroyInstance")) 10678 return (PFN_vkVoidFunction)vkDestroyInstance; 10679 if (!strcmp(funcName, "vkEnumerateInstanceLayerProperties")) 10680 return (PFN_vkVoidFunction)vkEnumerateInstanceLayerProperties; 10681 if (!strcmp(funcName, "vkEnumerateInstanceExtensionProperties")) 10682 return (PFN_vkVoidFunction)vkEnumerateInstanceExtensionProperties; 10683 if (!strcmp(funcName, "vkEnumerateDeviceLayerProperties")) 10684 return (PFN_vkVoidFunction)vkEnumerateDeviceLayerProperties; 10685 if (!strcmp(funcName, "vkEnumerateDeviceExtensionProperties")) 10686 return (PFN_vkVoidFunction)vkEnumerateDeviceExtensionProperties; 10687 10688 if (instance == NULL) 10689 return NULL; 10690 10691 PFN_vkVoidFunction fptr; 10692 10693 layer_data *my_data; 10694 my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); 10695 fptr = debug_report_get_instance_proc_addr(my_data->report_data, funcName); 10696 if (fptr) 10697 return fptr; 10698 10699 VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table; 10700 if (pTable->GetInstanceProcAddr == NULL) 10701 return NULL; 10702 return pTable->GetInstanceProcAddr(instance, funcName); 10703} 10704