core_validation.cpp revision 6ac48f04d8c4853d0fe84cbcd41f3ff4b6346672
1/* Copyright (c) 2015-2016 The Khronos Group Inc. 2 * Copyright (c) 2015-2016 Valve Corporation 3 * Copyright (c) 2015-2016 LunarG, Inc. 4 * Copyright (C) 2015-2016 Google Inc. 5 * 6 * Licensed under the Apache License, Version 2.0 (the "License"); 7 * you may not use this file except in compliance with the License. 8 * You may obtain a copy of the License at 9 * 10 * http://www.apache.org/licenses/LICENSE-2.0 11 * 12 * Unless required by applicable law or agreed to in writing, software 13 * distributed under the License is distributed on an "AS IS" BASIS, 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 * See the License for the specific language governing permissions and 16 * limitations under the License. 17 * 18 * Author: Cody Northrop <cnorthrop@google.com> 19 * Author: Michael Lentine <mlentine@google.com> 20 * Author: Tobin Ehlis <tobine@google.com> 21 * Author: Chia-I Wu <olv@google.com> 22 * Author: Chris Forbes <chrisf@ijw.co.nz> 23 * Author: Mark Lobodzinski <mark@lunarg.com> 24 * Author: Ian Elliott <ianelliott@google.com> 25 */ 26 27// Allow use of STL min and max functions in Windows 28#define NOMINMAX 29 30// Turn on mem_tracker merged code 31#define MTMERGESOURCE 1 32 33#include <SPIRV/spirv.hpp> 34#include <algorithm> 35#include <assert.h> 36#include <iostream> 37#include <list> 38#include <map> 39#include <mutex> 40#include <set> 41#include <stdio.h> 42#include <stdlib.h> 43#include <string.h> 44#include <string> 45#include <unordered_map> 46#include <unordered_set> 47 48#include "vk_loader_platform.h" 49#include "vk_dispatch_table_helper.h" 50#include "vk_struct_string_helper_cpp.h" 51#if defined(__GNUC__) 52#pragma GCC diagnostic ignored "-Wwrite-strings" 53#endif 54#if defined(__GNUC__) 55#pragma GCC diagnostic warning "-Wwrite-strings" 56#endif 57#include "vk_struct_size_helper.h" 58#include "core_validation.h" 59#include "vk_layer_config.h" 60#include "vk_layer_table.h" 61#include "vk_layer_data.h" 62#include "vk_layer_logging.h" 63#include "vk_layer_extension_utils.h" 64#include "vk_layer_utils.h" 65 66#if defined __ANDROID__ 67#include <android/log.h> 68#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__)) 69#else 70#define LOGCONSOLE(...) printf(__VA_ARGS__) 71#endif 72 73using std::unordered_map; 74using std::unordered_set; 75 76// WSI Image Objects bypass usual Image Object creation methods. A special Memory 77// Object value will be used to identify them internally. 78static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1); 79 80// Track command pools and their command buffers 81struct CMD_POOL_INFO { 82 VkCommandPoolCreateFlags createFlags; 83 uint32_t queueFamilyIndex; 84 list<VkCommandBuffer> commandBuffers; // list container of cmd buffers allocated from this pool 85}; 86 87struct devExts { 88 bool wsi_enabled; 89 unordered_map<VkSwapchainKHR, SWAPCHAIN_NODE *> swapchainMap; 90 unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap; 91}; 92 93// fwd decls 94struct shader_module; 95 96// TODO : Split this into separate structs for instance and device level data? 97struct layer_data { 98 debug_report_data *report_data; 99 std::vector<VkDebugReportCallbackEXT> logging_callback; 100 VkLayerDispatchTable *device_dispatch_table; 101 VkLayerInstanceDispatchTable *instance_dispatch_table; 102 103 devExts device_extensions; 104 unordered_set<VkQueue> queues; // all queues under given device 105 // Global set of all cmdBuffers that are inFlight on this device 106 unordered_set<VkCommandBuffer> globalInFlightCmdBuffers; 107 // Layer specific data 108 unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> sampleMap; 109 unordered_map<VkImageView, VkImageViewCreateInfo> imageViewMap; 110 unordered_map<VkImage, IMAGE_NODE> imageMap; 111 unordered_map<VkBufferView, VkBufferViewCreateInfo> bufferViewMap; 112 unordered_map<VkBuffer, BUFFER_NODE> bufferMap; 113 unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap; 114 unordered_map<VkCommandPool, CMD_POOL_INFO> commandPoolMap; 115 unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap; 116 unordered_map<VkDescriptorSet, SET_NODE *> setMap; 117 unordered_map<VkDescriptorSetLayout, LAYOUT_NODE *> descriptorSetLayoutMap; 118 unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap; 119 unordered_map<VkDeviceMemory, DEVICE_MEM_INFO> memObjMap; 120 unordered_map<VkFence, FENCE_NODE> fenceMap; 121 unordered_map<VkQueue, QUEUE_NODE> queueMap; 122 unordered_map<VkEvent, EVENT_NODE> eventMap; 123 unordered_map<QueryObject, bool> queryToStateMap; 124 unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap; 125 unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap; 126 unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap; 127 unordered_map<VkFramebuffer, FRAMEBUFFER_NODE> frameBufferMap; 128 unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap; 129 unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap; 130 unordered_map<VkRenderPass, RENDER_PASS_NODE *> renderPassMap; 131 unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap; 132 VkDevice device; 133 134 // Device specific data 135 PHYS_DEV_PROPERTIES_NODE phys_dev_properties; 136 VkPhysicalDeviceMemoryProperties phys_dev_mem_props; 137 138 layer_data() 139 : report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr), device_extensions(), 140 device(VK_NULL_HANDLE), phys_dev_properties{}, phys_dev_mem_props{} {}; 141}; 142 143// TODO : Do we need to guard access to layer_data_map w/ lock? 144static unordered_map<void *, layer_data *> layer_data_map; 145 146static const VkLayerProperties cv_global_layers[] = {{ 147 "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer", 148}}; 149 150template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) { 151 bool foundLayer = false; 152 for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) { 153 if (!strcmp(createInfo.ppEnabledLayerNames[i], cv_global_layers[0].layerName)) { 154 foundLayer = true; 155 } 156 // This has to be logged to console as we don't have a callback at this point. 157 if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) { 158 LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.", 159 cv_global_layers[0].layerName); 160 } 161 } 162} 163 164// Code imported from shader_checker 165static void build_def_index(shader_module *); 166 167// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words 168// without the caller needing to care too much about the physical SPIRV module layout. 169struct spirv_inst_iter { 170 std::vector<uint32_t>::const_iterator zero; 171 std::vector<uint32_t>::const_iterator it; 172 173 uint32_t len() { return *it >> 16; } 174 uint32_t opcode() { return *it & 0x0ffffu; } 175 uint32_t const &word(unsigned n) { return it[n]; } 176 uint32_t offset() { return (uint32_t)(it - zero); } 177 178 spirv_inst_iter() {} 179 180 spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {} 181 182 bool operator==(spirv_inst_iter const &other) { return it == other.it; } 183 184 bool operator!=(spirv_inst_iter const &other) { return it != other.it; } 185 186 spirv_inst_iter operator++(int) { /* x++ */ 187 spirv_inst_iter ii = *this; 188 it += len(); 189 return ii; 190 } 191 192 spirv_inst_iter operator++() { /* ++x; */ 193 it += len(); 194 return *this; 195 } 196 197 /* The iterator and the value are the same thing. */ 198 spirv_inst_iter &operator*() { return *this; } 199 spirv_inst_iter const &operator*() const { return *this; } 200}; 201 202struct shader_module { 203 /* the spirv image itself */ 204 vector<uint32_t> words; 205 /* a mapping of <id> to the first word of its def. this is useful because walking type 206 * trees, constant expressions, etc requires jumping all over the instruction stream. 207 */ 208 unordered_map<unsigned, unsigned> def_index; 209 210 shader_module(VkShaderModuleCreateInfo const *pCreateInfo) 211 : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)), 212 def_index() { 213 214 build_def_index(this); 215 } 216 217 /* expose begin() / end() to enable range-based for */ 218 spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */ 219 spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); } /* just past last insn */ 220 /* given an offset into the module, produce an iterator there. */ 221 spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); } 222 223 /* gets an iterator to the definition of an id */ 224 spirv_inst_iter get_def(unsigned id) const { 225 auto it = def_index.find(id); 226 if (it == def_index.end()) { 227 return end(); 228 } 229 return at(it->second); 230 } 231}; 232 233// TODO : This can be much smarter, using separate locks for separate global data 234static std::mutex global_lock; 235#if MTMERGESOURCE 236// MTMERGESOURCE - start of direct pull 237static VkDeviceMemory *get_object_mem_binding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) { 238 switch (type) { 239 case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: { 240 auto it = my_data->imageMap.find(VkImage(handle)); 241 if (it != my_data->imageMap.end()) 242 return &(*it).second.mem; 243 break; 244 } 245 case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: { 246 auto it = my_data->bufferMap.find(VkBuffer(handle)); 247 if (it != my_data->bufferMap.end()) 248 return &(*it).second.mem; 249 break; 250 } 251 default: 252 break; 253 } 254 return nullptr; 255} 256// MTMERGESOURCE - end section 257#endif 258template layer_data *get_my_data_ptr<layer_data>(void *data_key, std::unordered_map<void *, layer_data *> &data_map); 259 260// prototype 261static GLOBAL_CB_NODE *getCBNode(layer_data *, const VkCommandBuffer); 262 263#if MTMERGESOURCE 264// Helper function to validate correct usage bits set for buffers or images 265// Verify that (actual & desired) flags != 0 or, 266// if strict is true, verify that (actual & desired) flags == desired 267// In case of error, report it via dbg callbacks 268static bool validate_usage_flags(layer_data *my_data, VkFlags actual, VkFlags desired, VkBool32 strict, 269 uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str, 270 char const *func_name, char const *usage_str) { 271 bool correct_usage = false; 272 bool skipCall = false; 273 if (strict) 274 correct_usage = ((actual & desired) == desired); 275 else 276 correct_usage = ((actual & desired) != 0); 277 if (!correct_usage) { 278 skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__, 279 MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s %#" PRIxLEAST64 280 " used by %s. In this case, %s should have %s set during creation.", 281 ty_str, obj_handle, func_name, ty_str, usage_str); 282 } 283 return skipCall; 284} 285 286// Helper function to validate usage flags for images 287// Pulls image info and then sends actual vs. desired usage off to helper above where 288// an error will be flagged if usage is not correct 289static bool validate_image_usage_flags(layer_data *dev_data, VkImage image, VkFlags desired, VkBool32 strict, 290 char const *func_name, char const *usage_string) { 291 bool skipCall = false; 292 auto const image_node = dev_data->imageMap.find(image); 293 if (image_node != dev_data->imageMap.end()) { 294 skipCall = validate_usage_flags(dev_data, image_node->second.createInfo.usage, desired, strict, (uint64_t)image, 295 VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "image", func_name, usage_string); 296 } 297 return skipCall; 298} 299 300// Helper function to validate usage flags for buffers 301// Pulls buffer info and then sends actual vs. desired usage off to helper above where 302// an error will be flagged if usage is not correct 303static bool validate_buffer_usage_flags(layer_data *dev_data, VkBuffer buffer, VkFlags desired, VkBool32 strict, 304 char const *func_name, char const *usage_string) { 305 bool skipCall = false; 306 auto const buffer_node = dev_data->bufferMap.find(buffer); 307 if (buffer_node != dev_data->bufferMap.end()) { 308 skipCall = validate_usage_flags(dev_data, buffer_node->second.createInfo.usage, desired, strict, (uint64_t)buffer, 309 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "buffer", func_name, usage_string); 310 } 311 return skipCall; 312} 313 314// Return ptr to info in map container containing mem, or NULL if not found 315// Calls to this function should be wrapped in mutex 316static DEVICE_MEM_INFO *get_mem_obj_info(layer_data *dev_data, const VkDeviceMemory mem) { 317 auto item = dev_data->memObjMap.find(mem); 318 if (item != dev_data->memObjMap.end()) { 319 return &(*item).second; 320 } else { 321 return NULL; 322 } 323} 324 325static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem, 326 const VkMemoryAllocateInfo *pAllocateInfo) { 327 assert(object != NULL); 328 329 memcpy(&my_data->memObjMap[mem].allocInfo, pAllocateInfo, sizeof(VkMemoryAllocateInfo)); 330 // TODO: Update for real hardware, actually process allocation info structures 331 my_data->memObjMap[mem].allocInfo.pNext = NULL; 332 my_data->memObjMap[mem].object = object; 333 my_data->memObjMap[mem].mem = mem; 334 my_data->memObjMap[mem].image = VK_NULL_HANDLE; 335 my_data->memObjMap[mem].memRange.offset = 0; 336 my_data->memObjMap[mem].memRange.size = 0; 337 my_data->memObjMap[mem].pData = 0; 338 my_data->memObjMap[mem].pDriverData = 0; 339 my_data->memObjMap[mem].valid = false; 340} 341 342static bool validate_memory_is_valid(layer_data *dev_data, VkDeviceMemory mem, const char *functionName, 343 VkImage image = VK_NULL_HANDLE) { 344 if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) { 345 auto const image_node = dev_data->imageMap.find(image); 346 if (image_node != dev_data->imageMap.end() && !image_node->second.valid) { 347 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 348 (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM", 349 "%s: Cannot read invalid swapchain image %" PRIx64 ", please fill the memory before using.", 350 functionName, (uint64_t)(image)); 351 } 352 } else { 353 DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem); 354 if (pMemObj && !pMemObj->valid) { 355 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 356 (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM", 357 "%s: Cannot read invalid memory %" PRIx64 ", please fill the memory before using.", functionName, 358 (uint64_t)(mem)); 359 } 360 } 361 return false; 362} 363 364static void set_memory_valid(layer_data *dev_data, VkDeviceMemory mem, bool valid, VkImage image = VK_NULL_HANDLE) { 365 if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) { 366 auto image_node = dev_data->imageMap.find(image); 367 if (image_node != dev_data->imageMap.end()) { 368 image_node->second.valid = valid; 369 } 370 } else { 371 DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem); 372 if (pMemObj) { 373 pMemObj->valid = valid; 374 } 375 } 376} 377 378// Find CB Info and add mem reference to list container 379// Find Mem Obj Info and add CB reference to list container 380static bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem, 381 const char *apiName) { 382 bool skipCall = false; 383 384 // Skip validation if this image was created through WSI 385 if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) { 386 387 // First update CB binding in MemObj mini CB list 388 DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem); 389 if (pMemInfo) { 390 pMemInfo->commandBufferBindings.insert(cb); 391 // Now update CBInfo's Mem reference list 392 GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb); 393 // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object 394 if (pCBNode) { 395 pCBNode->memObjs.insert(mem); 396 } 397 } 398 } 399 return skipCall; 400} 401// For every mem obj bound to particular CB, free bindings related to that CB 402static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *pCBNode) { 403 if (pCBNode) { 404 if (pCBNode->memObjs.size() > 0) { 405 for (auto mem : pCBNode->memObjs) { 406 DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem); 407 if (pInfo) { 408 pInfo->commandBufferBindings.erase(pCBNode->commandBuffer); 409 } 410 } 411 pCBNode->memObjs.clear(); 412 } 413 pCBNode->validate_functions.clear(); 414 } 415} 416// Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up 417static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) { 418 clear_cmd_buf_and_mem_references(dev_data, getCBNode(dev_data, cb)); 419} 420 421// For given MemObjInfo, report Obj & CB bindings 422static bool reportMemReferencesAndCleanUp(layer_data *dev_data, DEVICE_MEM_INFO *pMemObjInfo) { 423 bool skipCall = false; 424 size_t cmdBufRefCount = pMemObjInfo->commandBufferBindings.size(); 425 size_t objRefCount = pMemObjInfo->objBindings.size(); 426 427 if ((pMemObjInfo->commandBufferBindings.size()) != 0) { 428 skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 429 (uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM", 430 "Attempting to free memory object %#" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER 431 " references", 432 (uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount)); 433 } 434 435 if (cmdBufRefCount > 0 && pMemObjInfo->commandBufferBindings.size() > 0) { 436 for (auto cb : pMemObjInfo->commandBufferBindings) { 437 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 438 (uint64_t)cb, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM", 439 "Command Buffer %p still has a reference to mem obj %#" PRIxLEAST64, cb, (uint64_t)pMemObjInfo->mem); 440 } 441 // Clear the list of hanging references 442 pMemObjInfo->commandBufferBindings.clear(); 443 } 444 445 if (objRefCount > 0 && pMemObjInfo->objBindings.size() > 0) { 446 for (auto obj : pMemObjInfo->objBindings) { 447 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__, 448 MEMTRACK_FREED_MEM_REF, "MEM", "VK Object %#" PRIxLEAST64 " still has a reference to mem obj %#" PRIxLEAST64, 449 obj.handle, (uint64_t)pMemObjInfo->mem); 450 } 451 // Clear the list of hanging references 452 pMemObjInfo->objBindings.clear(); 453 } 454 return skipCall; 455} 456 457static bool deleteMemObjInfo(layer_data *my_data, void *object, VkDeviceMemory mem) { 458 bool skipCall = false; 459 auto item = my_data->memObjMap.find(mem); 460 if (item != my_data->memObjMap.end()) { 461 my_data->memObjMap.erase(item); 462 } else { 463 skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 464 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM", 465 "Request to delete memory object %#" PRIxLEAST64 " not present in memory Object Map", (uint64_t)mem); 466 } 467 return skipCall; 468} 469 470static bool freeMemObjInfo(layer_data *dev_data, void *object, VkDeviceMemory mem, bool internal) { 471 bool skipCall = false; 472 // Parse global list to find info w/ mem 473 DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem); 474 if (pInfo) { 475 if (pInfo->allocInfo.allocationSize == 0 && !internal) { 476 // TODO: Verify against Valid Use section 477 skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 478 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM", 479 "Attempting to free memory associated with a Persistent Image, %#" PRIxLEAST64 ", " 480 "this should not be explicitly freed\n", 481 (uint64_t)mem); 482 } else { 483 // Clear any CB bindings for completed CBs 484 // TODO : Is there a better place to do this? 485 486 assert(pInfo->object != VK_NULL_HANDLE); 487 // clear_cmd_buf_and_mem_references removes elements from 488 // pInfo->commandBufferBindings -- this copy not needed in c++14, 489 // and probably not needed in practice in c++11 490 auto bindings = pInfo->commandBufferBindings; 491 for (auto cb : bindings) { 492 if (!dev_data->globalInFlightCmdBuffers.count(cb)) { 493 clear_cmd_buf_and_mem_references(dev_data, cb); 494 } 495 } 496 497 // Now verify that no references to this mem obj remain and remove bindings 498 if (pInfo->commandBufferBindings.size() || pInfo->objBindings.size()) { 499 skipCall |= reportMemReferencesAndCleanUp(dev_data, pInfo); 500 } 501 // Delete mem obj info 502 skipCall |= deleteMemObjInfo(dev_data, object, mem); 503 } 504 } 505 return skipCall; 506} 507 508static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) { 509 switch (type) { 510 case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: 511 return "image"; 512 case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: 513 return "buffer"; 514 case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT: 515 return "swapchain"; 516 default: 517 return "unknown"; 518 } 519} 520 521// Remove object binding performs 3 tasks: 522// 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it 523// 2. Clear mem binding for image/buffer by setting its handle to 0 524// TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized? 525static bool clear_object_binding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) { 526 // TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately 527 bool skipCall = false; 528 VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type); 529 if (pMemBinding) { 530 DEVICE_MEM_INFO *pMemObjInfo = get_mem_obj_info(dev_data, *pMemBinding); 531 // TODO : Make sure this is a reasonable way to reset mem binding 532 *pMemBinding = VK_NULL_HANDLE; 533 if (pMemObjInfo) { 534 // This obj is bound to a memory object. Remove the reference to this object in that memory object's list, 535 // and set the objects memory binding pointer to NULL. 536 if (!pMemObjInfo->objBindings.erase({handle, type})) { 537 skipCall |= 538 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT, 539 "MEM", "While trying to clear mem binding for %s obj %#" PRIxLEAST64 540 ", unable to find that object referenced by mem obj %#" PRIxLEAST64, 541 object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem); 542 } 543 } 544 } 545 return skipCall; 546} 547 548// For NULL mem case, output warning 549// Make sure given object is in global object map 550// IF a previous binding existed, output validation error 551// Otherwise, add reference from objectInfo to memoryInfo 552// Add reference off of objInfo 553static bool set_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, 554 VkDebugReportObjectTypeEXT type, const char *apiName) { 555 bool skipCall = false; 556 // Handle NULL case separately, just clear previous binding & decrement reference 557 if (mem == VK_NULL_HANDLE) { 558 // TODO: Verify against Valid Use section of spec. 559 skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ, 560 "MEM", "In %s, attempting to Bind Obj(%#" PRIxLEAST64 ") to NULL", apiName, handle); 561 } else { 562 VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type); 563 if (!pMemBinding) { 564 skipCall |= 565 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS, 566 "MEM", "In %s, attempting to update Binding of %s Obj(%#" PRIxLEAST64 ") that's not in global list", 567 object_type_to_string(type), apiName, handle); 568 } else { 569 // non-null case so should have real mem obj 570 DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem); 571 if (pMemInfo) { 572 DEVICE_MEM_INFO *pPrevBinding = get_mem_obj_info(dev_data, *pMemBinding); 573 if (pPrevBinding != NULL) { 574 skipCall |= 575 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 576 (uint64_t)mem, __LINE__, MEMTRACK_REBIND_OBJECT, "MEM", 577 "In %s, attempting to bind memory (%#" PRIxLEAST64 ") to object (%#" PRIxLEAST64 578 ") which has already been bound to mem object %#" PRIxLEAST64, 579 apiName, (uint64_t)mem, handle, (uint64_t)pPrevBinding->mem); 580 } else { 581 pMemInfo->objBindings.insert({handle, type}); 582 // For image objects, make sure default memory state is correctly set 583 // TODO : What's the best/correct way to handle this? 584 if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) { 585 auto const image_node = dev_data->imageMap.find(VkImage(handle)); 586 if (image_node != dev_data->imageMap.end()) { 587 VkImageCreateInfo ici = image_node->second.createInfo; 588 if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) { 589 // TODO:: More memory state transition stuff. 590 } 591 } 592 } 593 *pMemBinding = mem; 594 } 595 } 596 } 597 } 598 return skipCall; 599} 600 601// For NULL mem case, clear any previous binding Else... 602// Make sure given object is in its object map 603// IF a previous binding existed, update binding 604// Add reference from objectInfo to memoryInfo 605// Add reference off of object's binding info 606// Return VK_TRUE if addition is successful, VK_FALSE otherwise 607static bool set_sparse_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, 608 VkDebugReportObjectTypeEXT type, const char *apiName) { 609 bool skipCall = VK_FALSE; 610 // Handle NULL case separately, just clear previous binding & decrement reference 611 if (mem == VK_NULL_HANDLE) { 612 skipCall = clear_object_binding(dev_data, handle, type); 613 } else { 614 VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type); 615 if (!pMemBinding) { 616 skipCall |= log_msg( 617 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS, "MEM", 618 "In %s, attempting to update Binding of Obj(%#" PRIxLEAST64 ") that's not in global list()", apiName, handle); 619 } else { 620 // non-null case so should have real mem obj 621 DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem); 622 if (pInfo) { 623 pInfo->objBindings.insert({handle, type}); 624 // Need to set mem binding for this object 625 *pMemBinding = mem; 626 } 627 } 628 } 629 return skipCall; 630} 631 632// For given Object, get 'mem' obj that it's bound to or NULL if no binding 633static bool get_mem_binding_from_object(layer_data *dev_data, const uint64_t handle, 634 const VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) { 635 bool skipCall = false; 636 *mem = VK_NULL_HANDLE; 637 VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type); 638 if (pMemBinding) { 639 *mem = *pMemBinding; 640 } else { 641 skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT, 642 "MEM", "Trying to get mem binding for object %#" PRIxLEAST64 " but no such object in %s list", handle, 643 object_type_to_string(type)); 644 } 645 return skipCall; 646} 647 648// Print details of MemObjInfo list 649static void print_mem_list(layer_data *dev_data) { 650 DEVICE_MEM_INFO *pInfo = NULL; 651 652 // Early out if info is not requested 653 if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) { 654 return; 655 } 656 657 // Just printing each msg individually for now, may want to package these into single large print 658 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__, 659 MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)", 660 dev_data->memObjMap.size()); 661 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__, 662 MEMTRACK_NONE, "MEM", "============================="); 663 664 if (dev_data->memObjMap.size() <= 0) 665 return; 666 667 for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) { 668 pInfo = &(*ii).second; 669 670 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, 671 __LINE__, MEMTRACK_NONE, "MEM", " ===MemObjInfo at %p===", (void *)pInfo); 672 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, 673 __LINE__, MEMTRACK_NONE, "MEM", " Mem object: %#" PRIxLEAST64, (uint64_t)(pInfo->mem)); 674 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, 675 __LINE__, MEMTRACK_NONE, "MEM", " Ref Count: " PRINTF_SIZE_T_SPECIFIER, 676 pInfo->commandBufferBindings.size() + pInfo->objBindings.size()); 677 if (0 != pInfo->allocInfo.allocationSize) { 678 string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&pInfo->allocInfo, "MEM(INFO): "); 679 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, 680 __LINE__, MEMTRACK_NONE, "MEM", " Mem Alloc info:\n%s", pAllocInfoMsg.c_str()); 681 } else { 682 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, 683 __LINE__, MEMTRACK_NONE, "MEM", " Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())"); 684 } 685 686 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, 687 __LINE__, MEMTRACK_NONE, "MEM", " VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:", 688 pInfo->objBindings.size()); 689 if (pInfo->objBindings.size() > 0) { 690 for (auto obj : pInfo->objBindings) { 691 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 692 0, __LINE__, MEMTRACK_NONE, "MEM", " VK OBJECT %" PRIu64, obj.handle); 693 } 694 } 695 696 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, 697 __LINE__, MEMTRACK_NONE, "MEM", 698 " VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements", 699 pInfo->commandBufferBindings.size()); 700 if (pInfo->commandBufferBindings.size() > 0) { 701 for (auto cb : pInfo->commandBufferBindings) { 702 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 703 0, __LINE__, MEMTRACK_NONE, "MEM", " VK CB %p", cb); 704 } 705 } 706 } 707} 708 709static void printCBList(layer_data *my_data) { 710 GLOBAL_CB_NODE *pCBInfo = NULL; 711 712 // Early out if info is not requested 713 if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) { 714 return; 715 } 716 717 log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__, 718 MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)", 719 my_data->commandBufferMap.size()); 720 log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__, 721 MEMTRACK_NONE, "MEM", "=================="); 722 723 if (my_data->commandBufferMap.size() <= 0) 724 return; 725 726 for (auto &cb_node : my_data->commandBufferMap) { 727 pCBInfo = cb_node.second; 728 729 log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, 730 __LINE__, MEMTRACK_NONE, "MEM", " CB Info (%p) has CB %p", (void *)pCBInfo, (void *)pCBInfo->commandBuffer); 731 732 if (pCBInfo->memObjs.size() <= 0) 733 continue; 734 for (auto obj : pCBInfo->memObjs) { 735 log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, 736 __LINE__, MEMTRACK_NONE, "MEM", " Mem obj %" PRIu64, (uint64_t)obj); 737 } 738 } 739} 740 741#endif 742 743// Return a string representation of CMD_TYPE enum 744static string cmdTypeToString(CMD_TYPE cmd) { 745 switch (cmd) { 746 case CMD_BINDPIPELINE: 747 return "CMD_BINDPIPELINE"; 748 case CMD_BINDPIPELINEDELTA: 749 return "CMD_BINDPIPELINEDELTA"; 750 case CMD_SETVIEWPORTSTATE: 751 return "CMD_SETVIEWPORTSTATE"; 752 case CMD_SETLINEWIDTHSTATE: 753 return "CMD_SETLINEWIDTHSTATE"; 754 case CMD_SETDEPTHBIASSTATE: 755 return "CMD_SETDEPTHBIASSTATE"; 756 case CMD_SETBLENDSTATE: 757 return "CMD_SETBLENDSTATE"; 758 case CMD_SETDEPTHBOUNDSSTATE: 759 return "CMD_SETDEPTHBOUNDSSTATE"; 760 case CMD_SETSTENCILREADMASKSTATE: 761 return "CMD_SETSTENCILREADMASKSTATE"; 762 case CMD_SETSTENCILWRITEMASKSTATE: 763 return "CMD_SETSTENCILWRITEMASKSTATE"; 764 case CMD_SETSTENCILREFERENCESTATE: 765 return "CMD_SETSTENCILREFERENCESTATE"; 766 case CMD_BINDDESCRIPTORSETS: 767 return "CMD_BINDDESCRIPTORSETS"; 768 case CMD_BINDINDEXBUFFER: 769 return "CMD_BINDINDEXBUFFER"; 770 case CMD_BINDVERTEXBUFFER: 771 return "CMD_BINDVERTEXBUFFER"; 772 case CMD_DRAW: 773 return "CMD_DRAW"; 774 case CMD_DRAWINDEXED: 775 return "CMD_DRAWINDEXED"; 776 case CMD_DRAWINDIRECT: 777 return "CMD_DRAWINDIRECT"; 778 case CMD_DRAWINDEXEDINDIRECT: 779 return "CMD_DRAWINDEXEDINDIRECT"; 780 case CMD_DISPATCH: 781 return "CMD_DISPATCH"; 782 case CMD_DISPATCHINDIRECT: 783 return "CMD_DISPATCHINDIRECT"; 784 case CMD_COPYBUFFER: 785 return "CMD_COPYBUFFER"; 786 case CMD_COPYIMAGE: 787 return "CMD_COPYIMAGE"; 788 case CMD_BLITIMAGE: 789 return "CMD_BLITIMAGE"; 790 case CMD_COPYBUFFERTOIMAGE: 791 return "CMD_COPYBUFFERTOIMAGE"; 792 case CMD_COPYIMAGETOBUFFER: 793 return "CMD_COPYIMAGETOBUFFER"; 794 case CMD_CLONEIMAGEDATA: 795 return "CMD_CLONEIMAGEDATA"; 796 case CMD_UPDATEBUFFER: 797 return "CMD_UPDATEBUFFER"; 798 case CMD_FILLBUFFER: 799 return "CMD_FILLBUFFER"; 800 case CMD_CLEARCOLORIMAGE: 801 return "CMD_CLEARCOLORIMAGE"; 802 case CMD_CLEARATTACHMENTS: 803 return "CMD_CLEARCOLORATTACHMENT"; 804 case CMD_CLEARDEPTHSTENCILIMAGE: 805 return "CMD_CLEARDEPTHSTENCILIMAGE"; 806 case CMD_RESOLVEIMAGE: 807 return "CMD_RESOLVEIMAGE"; 808 case CMD_SETEVENT: 809 return "CMD_SETEVENT"; 810 case CMD_RESETEVENT: 811 return "CMD_RESETEVENT"; 812 case CMD_WAITEVENTS: 813 return "CMD_WAITEVENTS"; 814 case CMD_PIPELINEBARRIER: 815 return "CMD_PIPELINEBARRIER"; 816 case CMD_BEGINQUERY: 817 return "CMD_BEGINQUERY"; 818 case CMD_ENDQUERY: 819 return "CMD_ENDQUERY"; 820 case CMD_RESETQUERYPOOL: 821 return "CMD_RESETQUERYPOOL"; 822 case CMD_COPYQUERYPOOLRESULTS: 823 return "CMD_COPYQUERYPOOLRESULTS"; 824 case CMD_WRITETIMESTAMP: 825 return "CMD_WRITETIMESTAMP"; 826 case CMD_INITATOMICCOUNTERS: 827 return "CMD_INITATOMICCOUNTERS"; 828 case CMD_LOADATOMICCOUNTERS: 829 return "CMD_LOADATOMICCOUNTERS"; 830 case CMD_SAVEATOMICCOUNTERS: 831 return "CMD_SAVEATOMICCOUNTERS"; 832 case CMD_BEGINRENDERPASS: 833 return "CMD_BEGINRENDERPASS"; 834 case CMD_ENDRENDERPASS: 835 return "CMD_ENDRENDERPASS"; 836 default: 837 return "UNKNOWN"; 838 } 839} 840 841// SPIRV utility functions 842static void build_def_index(shader_module *module) { 843 for (auto insn : *module) { 844 switch (insn.opcode()) { 845 /* Types */ 846 case spv::OpTypeVoid: 847 case spv::OpTypeBool: 848 case spv::OpTypeInt: 849 case spv::OpTypeFloat: 850 case spv::OpTypeVector: 851 case spv::OpTypeMatrix: 852 case spv::OpTypeImage: 853 case spv::OpTypeSampler: 854 case spv::OpTypeSampledImage: 855 case spv::OpTypeArray: 856 case spv::OpTypeRuntimeArray: 857 case spv::OpTypeStruct: 858 case spv::OpTypeOpaque: 859 case spv::OpTypePointer: 860 case spv::OpTypeFunction: 861 case spv::OpTypeEvent: 862 case spv::OpTypeDeviceEvent: 863 case spv::OpTypeReserveId: 864 case spv::OpTypeQueue: 865 case spv::OpTypePipe: 866 module->def_index[insn.word(1)] = insn.offset(); 867 break; 868 869 /* Fixed constants */ 870 case spv::OpConstantTrue: 871 case spv::OpConstantFalse: 872 case spv::OpConstant: 873 case spv::OpConstantComposite: 874 case spv::OpConstantSampler: 875 case spv::OpConstantNull: 876 module->def_index[insn.word(2)] = insn.offset(); 877 break; 878 879 /* Specialization constants */ 880 case spv::OpSpecConstantTrue: 881 case spv::OpSpecConstantFalse: 882 case spv::OpSpecConstant: 883 case spv::OpSpecConstantComposite: 884 case spv::OpSpecConstantOp: 885 module->def_index[insn.word(2)] = insn.offset(); 886 break; 887 888 /* Variables */ 889 case spv::OpVariable: 890 module->def_index[insn.word(2)] = insn.offset(); 891 break; 892 893 /* Functions */ 894 case spv::OpFunction: 895 module->def_index[insn.word(2)] = insn.offset(); 896 break; 897 898 default: 899 /* We don't care about any other defs for now. */ 900 break; 901 } 902 } 903} 904 905static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) { 906 for (auto insn : *src) { 907 if (insn.opcode() == spv::OpEntryPoint) { 908 auto entrypointName = (char const *)&insn.word(3); 909 auto entrypointStageBits = 1u << insn.word(1); 910 911 if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) { 912 return insn; 913 } 914 } 915 } 916 917 return src->end(); 918} 919 920bool shader_is_spirv(VkShaderModuleCreateInfo const *pCreateInfo) { 921 uint32_t *words = (uint32_t *)pCreateInfo->pCode; 922 size_t sizeInWords = pCreateInfo->codeSize / sizeof(uint32_t); 923 924 /* Just validate that the header makes sense. */ 925 return sizeInWords >= 5 && words[0] == spv::MagicNumber && words[1] == spv::Version; 926} 927 928static char const *storage_class_name(unsigned sc) { 929 switch (sc) { 930 case spv::StorageClassInput: 931 return "input"; 932 case spv::StorageClassOutput: 933 return "output"; 934 case spv::StorageClassUniformConstant: 935 return "const uniform"; 936 case spv::StorageClassUniform: 937 return "uniform"; 938 case spv::StorageClassWorkgroup: 939 return "workgroup local"; 940 case spv::StorageClassCrossWorkgroup: 941 return "workgroup global"; 942 case spv::StorageClassPrivate: 943 return "private global"; 944 case spv::StorageClassFunction: 945 return "function"; 946 case spv::StorageClassGeneric: 947 return "generic"; 948 case spv::StorageClassAtomicCounter: 949 return "atomic counter"; 950 case spv::StorageClassImage: 951 return "image"; 952 case spv::StorageClassPushConstant: 953 return "push constant"; 954 default: 955 return "unknown"; 956 } 957} 958 959/* get the value of an integral constant */ 960unsigned get_constant_value(shader_module const *src, unsigned id) { 961 auto value = src->get_def(id); 962 assert(value != src->end()); 963 964 if (value.opcode() != spv::OpConstant) { 965 /* TODO: Either ensure that the specialization transform is already performed on a module we're 966 considering here, OR -- specialize on the fly now. 967 */ 968 return 1; 969 } 970 971 return value.word(3); 972} 973 974 975static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) { 976 auto insn = src->get_def(type); 977 assert(insn != src->end()); 978 979 switch (insn.opcode()) { 980 case spv::OpTypeBool: 981 ss << "bool"; 982 break; 983 case spv::OpTypeInt: 984 ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2); 985 break; 986 case spv::OpTypeFloat: 987 ss << "float" << insn.word(2); 988 break; 989 case spv::OpTypeVector: 990 ss << "vec" << insn.word(3) << " of "; 991 describe_type_inner(ss, src, insn.word(2)); 992 break; 993 case spv::OpTypeMatrix: 994 ss << "mat" << insn.word(3) << " of "; 995 describe_type_inner(ss, src, insn.word(2)); 996 break; 997 case spv::OpTypeArray: 998 ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of "; 999 describe_type_inner(ss, src, insn.word(2)); 1000 break; 1001 case spv::OpTypePointer: 1002 ss << "ptr to " << storage_class_name(insn.word(2)) << " "; 1003 describe_type_inner(ss, src, insn.word(3)); 1004 break; 1005 case spv::OpTypeStruct: { 1006 ss << "struct of ("; 1007 for (unsigned i = 2; i < insn.len(); i++) { 1008 describe_type_inner(ss, src, insn.word(i)); 1009 if (i == insn.len() - 1) { 1010 ss << ")"; 1011 } else { 1012 ss << ", "; 1013 } 1014 } 1015 break; 1016 } 1017 case spv::OpTypeSampler: 1018 ss << "sampler"; 1019 break; 1020 case spv::OpTypeSampledImage: 1021 ss << "sampler+"; 1022 describe_type_inner(ss, src, insn.word(2)); 1023 break; 1024 case spv::OpTypeImage: 1025 ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")"; 1026 break; 1027 default: 1028 ss << "oddtype"; 1029 break; 1030 } 1031} 1032 1033 1034static std::string describe_type(shader_module const *src, unsigned type) { 1035 std::ostringstream ss; 1036 describe_type_inner(ss, src, type); 1037 return ss.str(); 1038} 1039 1040 1041static bool is_narrow_numeric_type(spirv_inst_iter type) 1042{ 1043 if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat) 1044 return false; 1045 return type.word(2) < 64; 1046} 1047 1048 1049static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed, bool b_arrayed, bool relaxed) { 1050 /* walk two type trees together, and complain about differences */ 1051 auto a_insn = a->get_def(a_type); 1052 auto b_insn = b->get_def(b_type); 1053 assert(a_insn != a->end()); 1054 assert(b_insn != b->end()); 1055 1056 if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) { 1057 return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed); 1058 } 1059 1060 if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) { 1061 /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */ 1062 return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed); 1063 } 1064 1065 if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) { 1066 return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false); 1067 } 1068 1069 if (a_insn.opcode() != b_insn.opcode()) { 1070 return false; 1071 } 1072 1073 if (a_insn.opcode() == spv::OpTypePointer) { 1074 /* match on pointee type. storage class is expected to differ */ 1075 return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed); 1076 } 1077 1078 if (a_arrayed || b_arrayed) { 1079 /* if we havent resolved array-of-verts by here, we're not going to. */ 1080 return false; 1081 } 1082 1083 switch (a_insn.opcode()) { 1084 case spv::OpTypeBool: 1085 return true; 1086 case spv::OpTypeInt: 1087 /* match on width, signedness */ 1088 return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3); 1089 case spv::OpTypeFloat: 1090 /* match on width */ 1091 return a_insn.word(2) == b_insn.word(2); 1092 case spv::OpTypeVector: 1093 /* match on element type, count. */ 1094 if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false)) 1095 return false; 1096 if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) { 1097 return a_insn.word(3) >= b_insn.word(3); 1098 } 1099 else { 1100 return a_insn.word(3) == b_insn.word(3); 1101 } 1102 case spv::OpTypeMatrix: 1103 /* match on element type, count. */ 1104 return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && a_insn.word(3) == b_insn.word(3); 1105 case spv::OpTypeArray: 1106 /* match on element type, count. these all have the same layout. we don't get here if 1107 * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction, 1108 * not a literal within OpTypeArray */ 1109 return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && 1110 get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3)); 1111 case spv::OpTypeStruct: 1112 /* match on all element types */ 1113 { 1114 if (a_insn.len() != b_insn.len()) { 1115 return false; /* structs cannot match if member counts differ */ 1116 } 1117 1118 for (unsigned i = 2; i < a_insn.len(); i++) { 1119 if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) { 1120 return false; 1121 } 1122 } 1123 1124 return true; 1125 } 1126 default: 1127 /* remaining types are CLisms, or may not appear in the interfaces we 1128 * are interested in. Just claim no match. 1129 */ 1130 return false; 1131 } 1132} 1133 1134static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) { 1135 auto it = map.find(id); 1136 if (it == map.end()) 1137 return def; 1138 else 1139 return it->second; 1140} 1141 1142static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) { 1143 auto insn = src->get_def(type); 1144 assert(insn != src->end()); 1145 1146 switch (insn.opcode()) { 1147 case spv::OpTypePointer: 1148 /* see through the ptr -- this is only ever at the toplevel for graphics shaders; 1149 * we're never actually passing pointers around. */ 1150 return get_locations_consumed_by_type(src, insn.word(3), strip_array_level); 1151 case spv::OpTypeArray: 1152 if (strip_array_level) { 1153 return get_locations_consumed_by_type(src, insn.word(2), false); 1154 } else { 1155 return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false); 1156 } 1157 case spv::OpTypeMatrix: 1158 /* num locations is the dimension * element size */ 1159 return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false); 1160 case spv::OpTypeVector: { 1161 auto scalar_type = src->get_def(insn.word(2)); 1162 auto bit_width = (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ? 1163 scalar_type.word(2) : 32; 1164 1165 /* locations are 128-bit wide; 3- and 4-component vectors of 64 bit 1166 * types require two. */ 1167 return (bit_width * insn.word(3) + 127) / 128; 1168 } 1169 default: 1170 /* everything else is just 1. */ 1171 return 1; 1172 1173 /* TODO: extend to handle 64bit scalar types, whose vectors may need 1174 * multiple locations. */ 1175 } 1176} 1177 1178static unsigned get_locations_consumed_by_format(VkFormat format) { 1179 switch (format) { 1180 case VK_FORMAT_R64G64B64A64_SFLOAT: 1181 case VK_FORMAT_R64G64B64A64_SINT: 1182 case VK_FORMAT_R64G64B64A64_UINT: 1183 case VK_FORMAT_R64G64B64_SFLOAT: 1184 case VK_FORMAT_R64G64B64_SINT: 1185 case VK_FORMAT_R64G64B64_UINT: 1186 return 2; 1187 default: 1188 return 1; 1189 } 1190} 1191 1192typedef std::pair<unsigned, unsigned> location_t; 1193typedef std::pair<unsigned, unsigned> descriptor_slot_t; 1194 1195struct interface_var { 1196 uint32_t id; 1197 uint32_t type_id; 1198 uint32_t offset; 1199 bool is_patch; 1200 bool is_block_member; 1201 /* TODO: collect the name, too? Isn't required to be present. */ 1202}; 1203 1204struct shader_stage_attributes { 1205 char const *const name; 1206 bool arrayed_input; 1207 bool arrayed_output; 1208}; 1209 1210static shader_stage_attributes shader_stage_attribs[] = { 1211 {"vertex shader", false, false}, 1212 {"tessellation control shader", true, true}, 1213 {"tessellation evaluation shader", true, false}, 1214 {"geometry shader", true, false}, 1215 {"fragment shader", false, false}, 1216}; 1217 1218static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) { 1219 while (true) { 1220 1221 if (def.opcode() == spv::OpTypePointer) { 1222 def = src->get_def(def.word(3)); 1223 } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) { 1224 def = src->get_def(def.word(2)); 1225 is_array_of_verts = false; 1226 } else if (def.opcode() == spv::OpTypeStruct) { 1227 return def; 1228 } else { 1229 return src->end(); 1230 } 1231 } 1232} 1233 1234static void collect_interface_block_members(layer_data *my_data, shader_module const *src, 1235 std::map<location_t, interface_var> &out, 1236 std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts, 1237 uint32_t id, uint32_t type_id, bool is_patch) { 1238 /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */ 1239 auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch); 1240 if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) { 1241 /* this isn't an interface block. */ 1242 return; 1243 } 1244 1245 std::unordered_map<unsigned, unsigned> member_components; 1246 1247 /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */ 1248 for (auto insn : *src) { 1249 if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) { 1250 unsigned member_index = insn.word(2); 1251 1252 if (insn.word(3) == spv::DecorationComponent) { 1253 unsigned component = insn.word(4); 1254 member_components[member_index] = component; 1255 } 1256 } 1257 } 1258 1259 /* Second pass -- produce the output, from Location decorations */ 1260 for (auto insn : *src) { 1261 if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) { 1262 unsigned member_index = insn.word(2); 1263 unsigned member_type_id = type.word(2 + member_index); 1264 1265 if (insn.word(3) == spv::DecorationLocation) { 1266 unsigned location = insn.word(4); 1267 unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false); 1268 auto component_it = member_components.find(member_index); 1269 unsigned component = component_it == member_components.end() ? 0 : component_it->second; 1270 1271 for (unsigned int offset = 0; offset < num_locations; offset++) { 1272 interface_var v; 1273 v.id = id; 1274 /* TODO: member index in interface_var too? */ 1275 v.type_id = member_type_id; 1276 v.offset = offset; 1277 v.is_patch = is_patch; 1278 v.is_block_member = true; 1279 out[std::make_pair(location + offset, component)] = v; 1280 } 1281 } 1282 } 1283 } 1284} 1285 1286static void collect_interface_by_location(layer_data *my_data, shader_module const *src, spirv_inst_iter entrypoint, 1287 spv::StorageClass sinterface, std::map<location_t, interface_var> &out, 1288 bool is_array_of_verts) { 1289 std::unordered_map<unsigned, unsigned> var_locations; 1290 std::unordered_map<unsigned, unsigned> var_builtins; 1291 std::unordered_map<unsigned, unsigned> var_components; 1292 std::unordered_map<unsigned, unsigned> blocks; 1293 std::unordered_map<unsigned, unsigned> var_patch; 1294 1295 for (auto insn : *src) { 1296 1297 /* We consider two interface models: SSO rendezvous-by-location, and 1298 * builtins. Complain about anything that fits neither model. 1299 */ 1300 if (insn.opcode() == spv::OpDecorate) { 1301 if (insn.word(2) == spv::DecorationLocation) { 1302 var_locations[insn.word(1)] = insn.word(3); 1303 } 1304 1305 if (insn.word(2) == spv::DecorationBuiltIn) { 1306 var_builtins[insn.word(1)] = insn.word(3); 1307 } 1308 1309 if (insn.word(2) == spv::DecorationComponent) { 1310 var_components[insn.word(1)] = insn.word(3); 1311 } 1312 1313 if (insn.word(2) == spv::DecorationBlock) { 1314 blocks[insn.word(1)] = 1; 1315 } 1316 1317 if (insn.word(2) == spv::DecorationPatch) { 1318 var_patch[insn.word(1)] = 1; 1319 } 1320 } 1321 } 1322 1323 /* TODO: handle grouped decorations */ 1324 /* TODO: handle index=1 dual source outputs from FS -- two vars will 1325 * have the same location, and we DON'T want to clobber. */ 1326 1327 /* find the end of the entrypoint's name string. additional zero bytes follow the actual null 1328 terminator, to fill out the rest of the word - so we only need to look at the last byte in 1329 the word to determine which word contains the terminator. */ 1330 uint32_t word = 3; 1331 while (entrypoint.word(word) & 0xff000000u) { 1332 ++word; 1333 } 1334 ++word; 1335 1336 for (; word < entrypoint.len(); word++) { 1337 auto insn = src->get_def(entrypoint.word(word)); 1338 assert(insn != src->end()); 1339 assert(insn.opcode() == spv::OpVariable); 1340 1341 if (insn.word(3) == static_cast<uint32_t>(sinterface)) { 1342 unsigned id = insn.word(2); 1343 unsigned type = insn.word(1); 1344 1345 int location = value_or_default(var_locations, id, -1); 1346 int builtin = value_or_default(var_builtins, id, -1); 1347 unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */ 1348 bool is_patch = var_patch.find(id) != var_patch.end(); 1349 1350 /* All variables and interface block members in the Input or Output storage classes 1351 * must be decorated with either a builtin or an explicit location. 1352 * 1353 * TODO: integrate the interface block support here. For now, don't complain -- 1354 * a valid SPIRV module will only hit this path for the interface block case, as the 1355 * individual members of the type are decorated, rather than variable declarations. 1356 */ 1357 1358 if (location != -1) { 1359 /* A user-defined interface variable, with a location. Where a variable 1360 * occupied multiple locations, emit one result for each. */ 1361 unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch); 1362 for (unsigned int offset = 0; offset < num_locations; offset++) { 1363 interface_var v; 1364 v.id = id; 1365 v.type_id = type; 1366 v.offset = offset; 1367 v.is_patch = is_patch; 1368 v.is_block_member = false; 1369 out[std::make_pair(location + offset, component)] = v; 1370 } 1371 } else if (builtin == -1) { 1372 /* An interface block instance */ 1373 collect_interface_block_members(my_data, src, out, blocks, is_array_of_verts, id, type, is_patch); 1374 } 1375 } 1376 } 1377} 1378 1379static void collect_interface_by_descriptor_slot(layer_data *my_data, shader_module const *src, 1380 std::unordered_set<uint32_t> const &accessible_ids, 1381 std::map<descriptor_slot_t, interface_var> &out) { 1382 1383 std::unordered_map<unsigned, unsigned> var_sets; 1384 std::unordered_map<unsigned, unsigned> var_bindings; 1385 1386 for (auto insn : *src) { 1387 /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both 1388 * DecorationDescriptorSet and DecorationBinding. 1389 */ 1390 if (insn.opcode() == spv::OpDecorate) { 1391 if (insn.word(2) == spv::DecorationDescriptorSet) { 1392 var_sets[insn.word(1)] = insn.word(3); 1393 } 1394 1395 if (insn.word(2) == spv::DecorationBinding) { 1396 var_bindings[insn.word(1)] = insn.word(3); 1397 } 1398 } 1399 } 1400 1401 for (auto id : accessible_ids) { 1402 auto insn = src->get_def(id); 1403 assert(insn != src->end()); 1404 1405 if (insn.opcode() == spv::OpVariable && 1406 (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) { 1407 unsigned set = value_or_default(var_sets, insn.word(2), 0); 1408 unsigned binding = value_or_default(var_bindings, insn.word(2), 0); 1409 1410 auto existing_it = out.find(std::make_pair(set, binding)); 1411 if (existing_it != out.end()) { 1412 /* conflict within spv image */ 1413 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1414 __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC", 1415 "var %d (type %d) in %s interface in descriptor slot (%u,%u) conflicts with existing definition", 1416 insn.word(2), insn.word(1), storage_class_name(insn.word(3)), existing_it->first.first, 1417 existing_it->first.second); 1418 } 1419 1420 interface_var v; 1421 v.id = insn.word(2); 1422 v.type_id = insn.word(1); 1423 v.offset = 0; 1424 v.is_patch = false; 1425 v.is_block_member = false; 1426 out[std::make_pair(set, binding)] = v; 1427 } 1428 } 1429} 1430 1431static bool validate_interface_between_stages(layer_data *my_data, shader_module const *producer, 1432 spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage, 1433 shader_module const *consumer, spirv_inst_iter consumer_entrypoint, 1434 shader_stage_attributes const *consumer_stage) { 1435 std::map<location_t, interface_var> outputs; 1436 std::map<location_t, interface_var> inputs; 1437 1438 bool pass = true; 1439 1440 collect_interface_by_location(my_data, producer, producer_entrypoint, spv::StorageClassOutput, outputs, producer_stage->arrayed_output); 1441 collect_interface_by_location(my_data, consumer, consumer_entrypoint, spv::StorageClassInput, inputs, consumer_stage->arrayed_input); 1442 1443 auto a_it = outputs.begin(); 1444 auto b_it = inputs.begin(); 1445 1446 /* maps sorted by key (location); walk them together to find mismatches */ 1447 while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) { 1448 bool a_at_end = outputs.size() == 0 || a_it == outputs.end(); 1449 bool b_at_end = inputs.size() == 0 || b_it == inputs.end(); 1450 auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first; 1451 auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first; 1452 1453 if (b_at_end || ((!a_at_end) && (a_first < b_first))) { 1454 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1455 __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC", 1456 "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first, 1457 a_first.second, consumer_stage->name)) { 1458 pass = false; 1459 } 1460 a_it++; 1461 } else if (a_at_end || a_first > b_first) { 1462 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1463 __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", 1464 "%s consumes input location %u.%u which is not written by %s", consumer_stage->name, b_first.first, b_first.second, 1465 producer_stage->name)) { 1466 pass = false; 1467 } 1468 b_it++; 1469 } else { 1470 // subtleties of arrayed interfaces: 1471 // - if is_patch, then the member is not arrayed, even though the interface may be. 1472 // - if is_block_member, then the extra array level of an arrayed interface is not 1473 // expressed in the member type -- it's expressed in the block type. 1474 if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id, 1475 producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member, 1476 consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member, 1477 true)) { 1478 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1479 __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'", 1480 a_first.first, a_first.second, 1481 describe_type(producer, a_it->second.type_id).c_str(), 1482 describe_type(consumer, b_it->second.type_id).c_str())) { 1483 pass = false; 1484 } 1485 } 1486 if (a_it->second.is_patch != b_it->second.is_patch) { 1487 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0, 1488 __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", 1489 "Decoration mismatch on location %u.%u: is per-%s in %s stage but " 1490 "per-%s in %s stage", a_first.first, a_first.second, 1491 a_it->second.is_patch ? "patch" : "vertex", producer_stage->name, 1492 b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) { 1493 pass = false; 1494 } 1495 } 1496 a_it++; 1497 b_it++; 1498 } 1499 } 1500 1501 return pass; 1502} 1503 1504enum FORMAT_TYPE { 1505 FORMAT_TYPE_UNDEFINED, 1506 FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */ 1507 FORMAT_TYPE_SINT, 1508 FORMAT_TYPE_UINT, 1509}; 1510 1511static unsigned get_format_type(VkFormat fmt) { 1512 switch (fmt) { 1513 case VK_FORMAT_UNDEFINED: 1514 return FORMAT_TYPE_UNDEFINED; 1515 case VK_FORMAT_R8_SINT: 1516 case VK_FORMAT_R8G8_SINT: 1517 case VK_FORMAT_R8G8B8_SINT: 1518 case VK_FORMAT_R8G8B8A8_SINT: 1519 case VK_FORMAT_R16_SINT: 1520 case VK_FORMAT_R16G16_SINT: 1521 case VK_FORMAT_R16G16B16_SINT: 1522 case VK_FORMAT_R16G16B16A16_SINT: 1523 case VK_FORMAT_R32_SINT: 1524 case VK_FORMAT_R32G32_SINT: 1525 case VK_FORMAT_R32G32B32_SINT: 1526 case VK_FORMAT_R32G32B32A32_SINT: 1527 case VK_FORMAT_R64_SINT: 1528 case VK_FORMAT_R64G64_SINT: 1529 case VK_FORMAT_R64G64B64_SINT: 1530 case VK_FORMAT_R64G64B64A64_SINT: 1531 case VK_FORMAT_B8G8R8_SINT: 1532 case VK_FORMAT_B8G8R8A8_SINT: 1533 case VK_FORMAT_A8B8G8R8_SINT_PACK32: 1534 case VK_FORMAT_A2B10G10R10_SINT_PACK32: 1535 case VK_FORMAT_A2R10G10B10_SINT_PACK32: 1536 return FORMAT_TYPE_SINT; 1537 case VK_FORMAT_R8_UINT: 1538 case VK_FORMAT_R8G8_UINT: 1539 case VK_FORMAT_R8G8B8_UINT: 1540 case VK_FORMAT_R8G8B8A8_UINT: 1541 case VK_FORMAT_R16_UINT: 1542 case VK_FORMAT_R16G16_UINT: 1543 case VK_FORMAT_R16G16B16_UINT: 1544 case VK_FORMAT_R16G16B16A16_UINT: 1545 case VK_FORMAT_R32_UINT: 1546 case VK_FORMAT_R32G32_UINT: 1547 case VK_FORMAT_R32G32B32_UINT: 1548 case VK_FORMAT_R32G32B32A32_UINT: 1549 case VK_FORMAT_R64_UINT: 1550 case VK_FORMAT_R64G64_UINT: 1551 case VK_FORMAT_R64G64B64_UINT: 1552 case VK_FORMAT_R64G64B64A64_UINT: 1553 case VK_FORMAT_B8G8R8_UINT: 1554 case VK_FORMAT_B8G8R8A8_UINT: 1555 case VK_FORMAT_A8B8G8R8_UINT_PACK32: 1556 case VK_FORMAT_A2B10G10R10_UINT_PACK32: 1557 case VK_FORMAT_A2R10G10B10_UINT_PACK32: 1558 return FORMAT_TYPE_UINT; 1559 default: 1560 return FORMAT_TYPE_FLOAT; 1561 } 1562} 1563 1564/* characterizes a SPIR-V type appearing in an interface to a FF stage, 1565 * for comparison to a VkFormat's characterization above. */ 1566static unsigned get_fundamental_type(shader_module const *src, unsigned type) { 1567 auto insn = src->get_def(type); 1568 assert(insn != src->end()); 1569 1570 switch (insn.opcode()) { 1571 case spv::OpTypeInt: 1572 return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT; 1573 case spv::OpTypeFloat: 1574 return FORMAT_TYPE_FLOAT; 1575 case spv::OpTypeVector: 1576 return get_fundamental_type(src, insn.word(2)); 1577 case spv::OpTypeMatrix: 1578 return get_fundamental_type(src, insn.word(2)); 1579 case spv::OpTypeArray: 1580 return get_fundamental_type(src, insn.word(2)); 1581 case spv::OpTypePointer: 1582 return get_fundamental_type(src, insn.word(3)); 1583 default: 1584 return FORMAT_TYPE_UNDEFINED; 1585 } 1586} 1587 1588static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) { 1589 uint32_t bit_pos = u_ffs(stage); 1590 return bit_pos - 1; 1591} 1592 1593static bool validate_vi_consistency(layer_data *my_data, VkPipelineVertexInputStateCreateInfo const *vi) { 1594 /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer. 1595 * each binding should be specified only once. 1596 */ 1597 std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings; 1598 bool pass = true; 1599 1600 for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) { 1601 auto desc = &vi->pVertexBindingDescriptions[i]; 1602 auto &binding = bindings[desc->binding]; 1603 if (binding) { 1604 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1605 __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC", 1606 "Duplicate vertex input binding descriptions for binding %d", desc->binding)) { 1607 pass = false; 1608 } 1609 } else { 1610 binding = desc; 1611 } 1612 } 1613 1614 return pass; 1615} 1616 1617static bool validate_vi_against_vs_inputs(layer_data *my_data, VkPipelineVertexInputStateCreateInfo const *vi, 1618 shader_module const *vs, spirv_inst_iter entrypoint) { 1619 std::map<location_t, interface_var> inputs; 1620 bool pass = true; 1621 1622 collect_interface_by_location(my_data, vs, entrypoint, spv::StorageClassInput, inputs, false); 1623 1624 /* Build index by location */ 1625 std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs; 1626 if (vi) { 1627 for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) { 1628 auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format); 1629 for (auto j = 0u; j < num_locations; j++) { 1630 attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i]; 1631 } 1632 } 1633 } 1634 1635 auto it_a = attribs.begin(); 1636 auto it_b = inputs.begin(); 1637 1638 while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) { 1639 bool a_at_end = attribs.size() == 0 || it_a == attribs.end(); 1640 bool b_at_end = inputs.size() == 0 || it_b == inputs.end(); 1641 auto a_first = a_at_end ? 0 : it_a->first; 1642 auto b_first = b_at_end ? 0 : it_b->first.first; 1643 if (!a_at_end && (b_at_end || a_first < b_first)) { 1644 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1645 __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC", 1646 "Vertex attribute at location %d not consumed by VS", a_first)) { 1647 pass = false; 1648 } 1649 it_a++; 1650 } else if (!b_at_end && (a_at_end || b_first < a_first)) { 1651 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0, 1652 __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "VS consumes input at location %d but not provided", 1653 b_first)) { 1654 pass = false; 1655 } 1656 it_b++; 1657 } else { 1658 unsigned attrib_type = get_format_type(it_a->second->format); 1659 unsigned input_type = get_fundamental_type(vs, it_b->second.type_id); 1660 1661 /* type checking */ 1662 if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) { 1663 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1664 __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", 1665 "Attribute type of `%s` at location %d does not match VS input type of `%s`", 1666 string_VkFormat(it_a->second->format), a_first, 1667 describe_type(vs, it_b->second.type_id).c_str())) { 1668 pass = false; 1669 } 1670 } 1671 1672 /* OK! */ 1673 it_a++; 1674 it_b++; 1675 } 1676 } 1677 1678 return pass; 1679} 1680 1681static bool validate_fs_outputs_against_render_pass(layer_data *my_data, shader_module const *fs, 1682 spirv_inst_iter entrypoint, RENDER_PASS_NODE const *rp, uint32_t subpass) { 1683 const std::vector<VkFormat> &color_formats = rp->subpassColorFormats[subpass]; 1684 std::map<location_t, interface_var> outputs; 1685 bool pass = true; 1686 1687 /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */ 1688 1689 collect_interface_by_location(my_data, fs, entrypoint, spv::StorageClassOutput, outputs, false); 1690 1691 auto it = outputs.begin(); 1692 uint32_t attachment = 0; 1693 1694 /* Walk attachment list and outputs together -- this is a little overpowered since attachments 1695 * are currently dense, but the parallel with matching between shader stages is nice. 1696 */ 1697 1698 while ((outputs.size() > 0 && it != outputs.end()) || attachment < color_formats.size()) { 1699 if (attachment == color_formats.size() || (it != outputs.end() && it->first.first < attachment)) { 1700 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1701 __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC", 1702 "FS writes to output location %d with no matching attachment", it->first.first)) { 1703 pass = false; 1704 } 1705 it++; 1706 } else if (it == outputs.end() || it->first.first > attachment) { 1707 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1708 __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by FS", attachment)) { 1709 pass = false; 1710 } 1711 attachment++; 1712 } else { 1713 unsigned output_type = get_fundamental_type(fs, it->second.type_id); 1714 unsigned att_type = get_format_type(color_formats[attachment]); 1715 1716 /* type checking */ 1717 if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) { 1718 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1719 __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", 1720 "Attachment %d of type `%s` does not match FS output type of `%s`", attachment, 1721 string_VkFormat(color_formats[attachment]), 1722 describe_type(fs, it->second.type_id).c_str())) { 1723 pass = false; 1724 } 1725 } 1726 1727 /* OK! */ 1728 it++; 1729 attachment++; 1730 } 1731 } 1732 1733 return pass; 1734} 1735 1736/* For some analyses, we need to know about all ids referenced by the static call tree of a particular 1737 * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint, 1738 * for example. 1739 * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses. 1740 * - NOT the shader input/output interfaces. 1741 * 1742 * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth 1743 * converting parts of this to be generated from the machine-readable spec instead. 1744 */ 1745static void mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint, std::unordered_set<uint32_t> &ids) { 1746 std::unordered_set<uint32_t> worklist; 1747 worklist.insert(entrypoint.word(2)); 1748 1749 while (!worklist.empty()) { 1750 auto id_iter = worklist.begin(); 1751 auto id = *id_iter; 1752 worklist.erase(id_iter); 1753 1754 auto insn = src->get_def(id); 1755 if (insn == src->end()) { 1756 /* id is something we didn't collect in build_def_index. that's OK -- we'll stumble 1757 * across all kinds of things here that we may not care about. */ 1758 continue; 1759 } 1760 1761 /* try to add to the output set */ 1762 if (!ids.insert(id).second) { 1763 continue; /* if we already saw this id, we don't want to walk it again. */ 1764 } 1765 1766 switch (insn.opcode()) { 1767 case spv::OpFunction: 1768 /* scan whole body of the function, enlisting anything interesting */ 1769 while (++insn, insn.opcode() != spv::OpFunctionEnd) { 1770 switch (insn.opcode()) { 1771 case spv::OpLoad: 1772 case spv::OpAtomicLoad: 1773 case spv::OpAtomicExchange: 1774 case spv::OpAtomicCompareExchange: 1775 case spv::OpAtomicCompareExchangeWeak: 1776 case spv::OpAtomicIIncrement: 1777 case spv::OpAtomicIDecrement: 1778 case spv::OpAtomicIAdd: 1779 case spv::OpAtomicISub: 1780 case spv::OpAtomicSMin: 1781 case spv::OpAtomicUMin: 1782 case spv::OpAtomicSMax: 1783 case spv::OpAtomicUMax: 1784 case spv::OpAtomicAnd: 1785 case spv::OpAtomicOr: 1786 case spv::OpAtomicXor: 1787 worklist.insert(insn.word(3)); /* ptr */ 1788 break; 1789 case spv::OpStore: 1790 case spv::OpAtomicStore: 1791 worklist.insert(insn.word(1)); /* ptr */ 1792 break; 1793 case spv::OpAccessChain: 1794 case spv::OpInBoundsAccessChain: 1795 worklist.insert(insn.word(3)); /* base ptr */ 1796 break; 1797 case spv::OpSampledImage: 1798 case spv::OpImageSampleImplicitLod: 1799 case spv::OpImageSampleExplicitLod: 1800 case spv::OpImageSampleDrefImplicitLod: 1801 case spv::OpImageSampleDrefExplicitLod: 1802 case spv::OpImageSampleProjImplicitLod: 1803 case spv::OpImageSampleProjExplicitLod: 1804 case spv::OpImageSampleProjDrefImplicitLod: 1805 case spv::OpImageSampleProjDrefExplicitLod: 1806 case spv::OpImageFetch: 1807 case spv::OpImageGather: 1808 case spv::OpImageDrefGather: 1809 case spv::OpImageRead: 1810 case spv::OpImage: 1811 case spv::OpImageQueryFormat: 1812 case spv::OpImageQueryOrder: 1813 case spv::OpImageQuerySizeLod: 1814 case spv::OpImageQuerySize: 1815 case spv::OpImageQueryLod: 1816 case spv::OpImageQueryLevels: 1817 case spv::OpImageQuerySamples: 1818 case spv::OpImageSparseSampleImplicitLod: 1819 case spv::OpImageSparseSampleExplicitLod: 1820 case spv::OpImageSparseSampleDrefImplicitLod: 1821 case spv::OpImageSparseSampleDrefExplicitLod: 1822 case spv::OpImageSparseSampleProjImplicitLod: 1823 case spv::OpImageSparseSampleProjExplicitLod: 1824 case spv::OpImageSparseSampleProjDrefImplicitLod: 1825 case spv::OpImageSparseSampleProjDrefExplicitLod: 1826 case spv::OpImageSparseFetch: 1827 case spv::OpImageSparseGather: 1828 case spv::OpImageSparseDrefGather: 1829 case spv::OpImageTexelPointer: 1830 worklist.insert(insn.word(3)); /* image or sampled image */ 1831 break; 1832 case spv::OpImageWrite: 1833 worklist.insert(insn.word(1)); /* image -- different operand order to above */ 1834 break; 1835 case spv::OpFunctionCall: 1836 for (uint32_t i = 3; i < insn.len(); i++) { 1837 worklist.insert(insn.word(i)); /* fn itself, and all args */ 1838 } 1839 break; 1840 1841 case spv::OpExtInst: 1842 for (uint32_t i = 5; i < insn.len(); i++) { 1843 worklist.insert(insn.word(i)); /* operands to ext inst */ 1844 } 1845 break; 1846 } 1847 } 1848 break; 1849 } 1850 } 1851} 1852 1853static bool validate_push_constant_block_against_pipeline(layer_data *my_data, 1854 std::vector<VkPushConstantRange> const *pushConstantRanges, 1855 shader_module const *src, spirv_inst_iter type, 1856 VkShaderStageFlagBits stage) { 1857 bool pass = true; 1858 1859 /* strip off ptrs etc */ 1860 type = get_struct_type(src, type, false); 1861 assert(type != src->end()); 1862 1863 /* validate directly off the offsets. this isn't quite correct for arrays 1864 * and matrices, but is a good first step. TODO: arrays, matrices, weird 1865 * sizes */ 1866 for (auto insn : *src) { 1867 if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) { 1868 1869 if (insn.word(3) == spv::DecorationOffset) { 1870 unsigned offset = insn.word(4); 1871 auto size = 4; /* bytes; TODO: calculate this based on the type */ 1872 1873 bool found_range = false; 1874 for (auto const &range : *pushConstantRanges) { 1875 if (range.offset <= offset && range.offset + range.size >= offset + size) { 1876 found_range = true; 1877 1878 if ((range.stageFlags & stage) == 0) { 1879 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1880 __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC", 1881 "Push constant range covering variable starting at " 1882 "offset %u not accessible from stage %s", 1883 offset, string_VkShaderStageFlagBits(stage))) { 1884 pass = false; 1885 } 1886 } 1887 1888 break; 1889 } 1890 } 1891 1892 if (!found_range) { 1893 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 1894 __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC", 1895 "Push constant range covering variable starting at " 1896 "offset %u not declared in layout", 1897 offset)) { 1898 pass = false; 1899 } 1900 } 1901 } 1902 } 1903 } 1904 1905 return pass; 1906} 1907 1908static bool validate_push_constant_usage(layer_data *my_data, 1909 std::vector<VkPushConstantRange> const *pushConstantRanges, shader_module const *src, 1910 std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) { 1911 bool pass = true; 1912 1913 for (auto id : accessible_ids) { 1914 auto def_insn = src->get_def(id); 1915 if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) { 1916 pass &= validate_push_constant_block_against_pipeline(my_data, pushConstantRanges, src, 1917 src->get_def(def_insn.word(1)), stage); 1918 } 1919 } 1920 1921 return pass; 1922} 1923 1924// For given pipelineLayout verify that the setLayout at slot.first 1925// has the requested binding at slot.second 1926static VkDescriptorSetLayoutBinding const * get_descriptor_binding(layer_data *my_data, PIPELINE_LAYOUT_NODE *pipelineLayout, descriptor_slot_t slot) { 1927 1928 if (!pipelineLayout) 1929 return nullptr; 1930 1931 if (slot.first >= pipelineLayout->descriptorSetLayouts.size()) 1932 return nullptr; 1933 1934 auto const layout_node = my_data->descriptorSetLayoutMap[pipelineLayout->descriptorSetLayouts[slot.first]]; 1935 1936 auto bindingIt = layout_node->bindingToIndexMap.find(slot.second); 1937 if ((bindingIt == layout_node->bindingToIndexMap.end()) || (layout_node->createInfo.pBindings == NULL)) 1938 return nullptr; 1939 1940 assert(bindingIt->second < layout_node->createInfo.bindingCount); 1941 return &layout_node->createInfo.pBindings[bindingIt->second]; 1942} 1943 1944// Block of code at start here for managing/tracking Pipeline state that this layer cares about 1945 1946static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0}; 1947 1948// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound 1949// Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates 1950// to that same cmd buffer by separate thread are not changing state from underneath us 1951// Track the last cmd buffer touched by this thread 1952 1953static bool hasDrawCmd(GLOBAL_CB_NODE *pCB) { 1954 for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) { 1955 if (pCB->drawCount[i]) 1956 return true; 1957 } 1958 return false; 1959} 1960 1961// Check object status for selected flag state 1962static bool validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags, 1963 DRAW_STATE_ERROR error_code, const char *fail_msg) { 1964 if (!(pNode->status & status_mask)) { 1965 return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 1966 reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, error_code, "DS", 1967 "CB object %#" PRIxLEAST64 ": %s", reinterpret_cast<const uint64_t &>(pNode->commandBuffer), fail_msg); 1968 } 1969 return false; 1970} 1971 1972// Retrieve pipeline node ptr for given pipeline object 1973static PIPELINE_NODE *getPipeline(layer_data *my_data, const VkPipeline pipeline) { 1974 if (my_data->pipelineMap.find(pipeline) == my_data->pipelineMap.end()) { 1975 return NULL; 1976 } 1977 return my_data->pipelineMap[pipeline]; 1978} 1979 1980// Return true if for a given PSO, the given state enum is dynamic, else return false 1981static bool isDynamic(const PIPELINE_NODE *pPipeline, const VkDynamicState state) { 1982 if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) { 1983 for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) { 1984 if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) 1985 return true; 1986 } 1987 } 1988 return false; 1989} 1990 1991// Validate state stored as flags at time of draw call 1992static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe, bool indexedDraw) { 1993 bool result; 1994 result = validate_status(dev_data, pCB, CBSTATUS_VIEWPORT_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_VIEWPORT_NOT_BOUND, 1995 "Dynamic viewport state not set for this command buffer"); 1996 result |= validate_status(dev_data, pCB, CBSTATUS_SCISSOR_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_SCISSOR_NOT_BOUND, 1997 "Dynamic scissor state not set for this command buffer"); 1998 if (pPipe->graphicsPipelineCI.pInputAssemblyState && 1999 ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) || 2000 (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) { 2001 result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2002 DRAWSTATE_LINE_WIDTH_NOT_BOUND, "Dynamic line width state not set for this command buffer"); 2003 } 2004 if (pPipe->graphicsPipelineCI.pRasterizationState && 2005 (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) { 2006 result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2007 DRAWSTATE_DEPTH_BIAS_NOT_BOUND, "Dynamic depth bias state not set for this command buffer"); 2008 } 2009 if (pPipe->blendConstantsEnabled) { 2010 result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2011 DRAWSTATE_BLEND_NOT_BOUND, "Dynamic blend constants state not set for this command buffer"); 2012 } 2013 if (pPipe->graphicsPipelineCI.pDepthStencilState && 2014 (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) { 2015 result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2016 DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, "Dynamic depth bounds state not set for this command buffer"); 2017 } 2018 if (pPipe->graphicsPipelineCI.pDepthStencilState && 2019 (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) { 2020 result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2021 DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil read mask state not set for this command buffer"); 2022 result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2023 DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil write mask state not set for this command buffer"); 2024 result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2025 DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil reference state not set for this command buffer"); 2026 } 2027 if (indexedDraw) { 2028 result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2029 DRAWSTATE_INDEX_BUFFER_NOT_BOUND, 2030 "Index buffer object not bound to this command buffer when Indexed Draw attempted"); 2031 } 2032 return result; 2033} 2034 2035// Verify attachment reference compatibility according to spec 2036// If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this 2037// If both AttachmentReference arrays have requested index, check their corresponding AttachementDescriptions 2038// to make sure that format and samples counts match. 2039// If not, they are not compatible. 2040static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary, 2041 const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments, 2042 const VkAttachmentReference *pSecondary, const uint32_t secondaryCount, 2043 const VkAttachmentDescription *pSecondaryAttachments) { 2044 if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED 2045 if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment) 2046 return true; 2047 } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED 2048 if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment) 2049 return true; 2050 } else { // format and sample count must match 2051 if ((pPrimaryAttachments[pPrimary[index].attachment].format == 2052 pSecondaryAttachments[pSecondary[index].attachment].format) && 2053 (pPrimaryAttachments[pPrimary[index].attachment].samples == 2054 pSecondaryAttachments[pSecondary[index].attachment].samples)) 2055 return true; 2056 } 2057 // Format and sample counts didn't match 2058 return false; 2059} 2060 2061// For give primary and secondary RenderPass objects, verify that they're compatible 2062static bool verify_renderpass_compatibility(layer_data *my_data, const VkRenderPass primaryRP, const VkRenderPass secondaryRP, 2063 string &errorMsg) { 2064 if (my_data->renderPassMap.find(primaryRP) == my_data->renderPassMap.end()) { 2065 stringstream errorStr; 2066 errorStr << "invalid VkRenderPass (" << primaryRP << ")"; 2067 errorMsg = errorStr.str(); 2068 return false; 2069 } else if (my_data->renderPassMap.find(secondaryRP) == my_data->renderPassMap.end()) { 2070 stringstream errorStr; 2071 errorStr << "invalid VkRenderPass (" << secondaryRP << ")"; 2072 errorMsg = errorStr.str(); 2073 return false; 2074 } 2075 // Trivial pass case is exact same RP 2076 if (primaryRP == secondaryRP) { 2077 return true; 2078 } 2079 const VkRenderPassCreateInfo *primaryRPCI = my_data->renderPassMap[primaryRP]->pCreateInfo; 2080 const VkRenderPassCreateInfo *secondaryRPCI = my_data->renderPassMap[secondaryRP]->pCreateInfo; 2081 if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) { 2082 stringstream errorStr; 2083 errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount 2084 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses."; 2085 errorMsg = errorStr.str(); 2086 return false; 2087 } 2088 uint32_t spIndex = 0; 2089 for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) { 2090 // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible 2091 uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount; 2092 uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount; 2093 uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount); 2094 for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) { 2095 if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount, 2096 primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments, 2097 secondaryColorCount, secondaryRPCI->pAttachments)) { 2098 stringstream errorStr; 2099 errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible."; 2100 errorMsg = errorStr.str(); 2101 return false; 2102 } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments, 2103 primaryColorCount, primaryRPCI->pAttachments, 2104 secondaryRPCI->pSubpasses[spIndex].pResolveAttachments, 2105 secondaryColorCount, secondaryRPCI->pAttachments)) { 2106 stringstream errorStr; 2107 errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible."; 2108 errorMsg = errorStr.str(); 2109 return false; 2110 } 2111 } 2112 2113 if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment, 2114 1, primaryRPCI->pAttachments, 2115 secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment, 2116 1, secondaryRPCI->pAttachments)) { 2117 stringstream errorStr; 2118 errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible."; 2119 errorMsg = errorStr.str(); 2120 return false; 2121 } 2122 2123 uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount; 2124 uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount; 2125 uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount); 2126 for (uint32_t i = 0; i < inputMax; ++i) { 2127 if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount, 2128 primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments, 2129 secondaryColorCount, secondaryRPCI->pAttachments)) { 2130 stringstream errorStr; 2131 errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible."; 2132 errorMsg = errorStr.str(); 2133 return false; 2134 } 2135 } 2136 } 2137 return true; 2138} 2139 2140// For give SET_NODE, verify that its Set is compatible w/ the setLayout corresponding to pipelineLayout[layoutIndex] 2141static bool verify_set_layout_compatibility(layer_data *my_data, const SET_NODE *pSet, const VkPipelineLayout layout, 2142 const uint32_t layoutIndex, string &errorMsg) { 2143 auto pipeline_layout_it = my_data->pipelineLayoutMap.find(layout); 2144 if (pipeline_layout_it == my_data->pipelineLayoutMap.end()) { 2145 stringstream errorStr; 2146 errorStr << "invalid VkPipelineLayout (" << layout << ")"; 2147 errorMsg = errorStr.str(); 2148 return false; 2149 } 2150 if (layoutIndex >= pipeline_layout_it->second.descriptorSetLayouts.size()) { 2151 stringstream errorStr; 2152 errorStr << "VkPipelineLayout (" << layout << ") only contains " << pipeline_layout_it->second.descriptorSetLayouts.size() 2153 << " setLayouts corresponding to sets 0-" << pipeline_layout_it->second.descriptorSetLayouts.size() - 1 2154 << ", but you're attempting to bind set to index " << layoutIndex; 2155 errorMsg = errorStr.str(); 2156 return false; 2157 } 2158 // Get the specific setLayout from PipelineLayout that overlaps this set 2159 LAYOUT_NODE *pLayoutNode = my_data->descriptorSetLayoutMap[pipeline_layout_it->second.descriptorSetLayouts[layoutIndex]]; 2160 if (pLayoutNode->layout == pSet->pLayout->layout) { // trivial pass case 2161 return true; 2162 } 2163 size_t descriptorCount = pLayoutNode->descriptorTypes.size(); 2164 if (descriptorCount != pSet->pLayout->descriptorTypes.size()) { 2165 stringstream errorStr; 2166 errorStr << "setLayout " << layoutIndex << " from pipelineLayout " << layout << " has " << descriptorCount 2167 << " descriptors, but corresponding set being bound has " << pSet->pLayout->descriptorTypes.size() 2168 << " descriptors."; 2169 errorMsg = errorStr.str(); 2170 return false; // trivial fail case 2171 } 2172 // Now need to check set against corresponding pipelineLayout to verify compatibility 2173 for (size_t i = 0; i < descriptorCount; ++i) { 2174 // Need to verify that layouts are identically defined 2175 // TODO : Is below sufficient? Making sure that types & stageFlags match per descriptor 2176 // do we also need to check immutable samplers? 2177 if (pLayoutNode->descriptorTypes[i] != pSet->pLayout->descriptorTypes[i]) { 2178 stringstream errorStr; 2179 errorStr << "descriptor " << i << " for descriptorSet being bound is type '" 2180 << string_VkDescriptorType(pSet->pLayout->descriptorTypes[i]) 2181 << "' but corresponding descriptor from pipelineLayout is type '" 2182 << string_VkDescriptorType(pLayoutNode->descriptorTypes[i]) << "'"; 2183 errorMsg = errorStr.str(); 2184 return false; 2185 } 2186 if (pLayoutNode->stageFlags[i] != pSet->pLayout->stageFlags[i]) { 2187 stringstream errorStr; 2188 errorStr << "stageFlags " << i << " for descriptorSet being bound is " << pSet->pLayout->stageFlags[i] 2189 << "' but corresponding descriptor from pipelineLayout has stageFlags " << pLayoutNode->stageFlags[i]; 2190 errorMsg = errorStr.str(); 2191 return false; 2192 } 2193 } 2194 return true; 2195} 2196 2197// Validate that data for each specialization entry is fully contained within the buffer. 2198static bool validate_specialization_offsets(layer_data *my_data, VkPipelineShaderStageCreateInfo const *info) { 2199 bool pass = true; 2200 2201 VkSpecializationInfo const *spec = info->pSpecializationInfo; 2202 2203 if (spec) { 2204 for (auto i = 0u; i < spec->mapEntryCount; i++) { 2205 if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) { 2206 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 2207 /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC", 2208 "Specialization entry %u (for constant id %u) references memory outside provided " 2209 "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER 2210 " bytes provided)", 2211 i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset, 2212 spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) { 2213 2214 pass = false; 2215 } 2216 } 2217 } 2218 } 2219 2220 return pass; 2221} 2222 2223static bool descriptor_type_match(layer_data *my_data, shader_module const *module, uint32_t type_id, 2224 VkDescriptorType descriptor_type, unsigned &descriptor_count) { 2225 auto type = module->get_def(type_id); 2226 2227 descriptor_count = 1; 2228 2229 /* Strip off any array or ptrs. Where we remove array levels, adjust the 2230 * descriptor count for each dimension. */ 2231 while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) { 2232 if (type.opcode() == spv::OpTypeArray) { 2233 descriptor_count *= get_constant_value(module, type.word(3)); 2234 type = module->get_def(type.word(2)); 2235 } 2236 else { 2237 type = module->get_def(type.word(3)); 2238 } 2239 } 2240 2241 switch (type.opcode()) { 2242 case spv::OpTypeStruct: { 2243 for (auto insn : *module) { 2244 if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) { 2245 if (insn.word(2) == spv::DecorationBlock) { 2246 return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER || 2247 descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; 2248 } else if (insn.word(2) == spv::DecorationBufferBlock) { 2249 return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER || 2250 descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC; 2251 } 2252 } 2253 } 2254 2255 /* Invalid */ 2256 return false; 2257 } 2258 2259 case spv::OpTypeSampler: 2260 return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER; 2261 2262 case spv::OpTypeSampledImage: 2263 if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) { 2264 /* Slight relaxation for some GLSL historical madness: samplerBuffer 2265 * doesn't really have a sampler, and a texel buffer descriptor 2266 * doesn't really provide one. Allow this slight mismatch. 2267 */ 2268 auto image_type = module->get_def(type.word(2)); 2269 auto dim = image_type.word(3); 2270 auto sampled = image_type.word(7); 2271 return dim == spv::DimBuffer && sampled == 1; 2272 } 2273 return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; 2274 2275 case spv::OpTypeImage: { 2276 /* Many descriptor types backing image types-- depends on dimension 2277 * and whether the image will be used with a sampler. SPIRV for 2278 * Vulkan requires that sampled be 1 or 2 -- leaving the decision to 2279 * runtime is unacceptable. 2280 */ 2281 auto dim = type.word(3); 2282 auto sampled = type.word(7); 2283 2284 if (dim == spv::DimSubpassData) { 2285 return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; 2286 } else if (dim == spv::DimBuffer) { 2287 if (sampled == 1) { 2288 return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; 2289 } else { 2290 return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; 2291 } 2292 } else if (sampled == 1) { 2293 return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; 2294 } else { 2295 return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; 2296 } 2297 } 2298 2299 /* We shouldn't really see any other junk types -- but if we do, they're 2300 * a mismatch. 2301 */ 2302 default: 2303 return false; /* Mismatch */ 2304 } 2305} 2306 2307static bool require_feature(layer_data *my_data, VkBool32 feature, char const *feature_name) { 2308 if (!feature) { 2309 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 2310 __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC", 2311 "Shader requires VkPhysicalDeviceFeatures::%s but is not " 2312 "enabled on the device", 2313 feature_name)) { 2314 return false; 2315 } 2316 } 2317 2318 return true; 2319} 2320 2321static bool validate_shader_capabilities(layer_data *my_data, shader_module const *src) { 2322 bool pass = true; 2323 2324 auto enabledFeatures = &my_data->phys_dev_properties.features; 2325 2326 for (auto insn : *src) { 2327 if (insn.opcode() == spv::OpCapability) { 2328 switch (insn.word(1)) { 2329 case spv::CapabilityMatrix: 2330 case spv::CapabilityShader: 2331 case spv::CapabilityInputAttachment: 2332 case spv::CapabilitySampled1D: 2333 case spv::CapabilityImage1D: 2334 case spv::CapabilitySampledBuffer: 2335 case spv::CapabilityImageBuffer: 2336 case spv::CapabilityImageQuery: 2337 case spv::CapabilityDerivativeControl: 2338 // Always supported by a Vulkan 1.0 implementation -- no feature bits. 2339 break; 2340 2341 case spv::CapabilityGeometry: 2342 pass &= require_feature(my_data, enabledFeatures->geometryShader, "geometryShader"); 2343 break; 2344 2345 case spv::CapabilityTessellation: 2346 pass &= require_feature(my_data, enabledFeatures->tessellationShader, "tessellationShader"); 2347 break; 2348 2349 case spv::CapabilityFloat64: 2350 pass &= require_feature(my_data, enabledFeatures->shaderFloat64, "shaderFloat64"); 2351 break; 2352 2353 case spv::CapabilityInt64: 2354 pass &= require_feature(my_data, enabledFeatures->shaderInt64, "shaderInt64"); 2355 break; 2356 2357 case spv::CapabilityTessellationPointSize: 2358 case spv::CapabilityGeometryPointSize: 2359 pass &= require_feature(my_data, enabledFeatures->shaderTessellationAndGeometryPointSize, 2360 "shaderTessellationAndGeometryPointSize"); 2361 break; 2362 2363 case spv::CapabilityImageGatherExtended: 2364 pass &= require_feature(my_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended"); 2365 break; 2366 2367 case spv::CapabilityStorageImageMultisample: 2368 pass &= require_feature(my_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample"); 2369 break; 2370 2371 case spv::CapabilityUniformBufferArrayDynamicIndexing: 2372 pass &= require_feature(my_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing, 2373 "shaderUniformBufferArrayDynamicIndexing"); 2374 break; 2375 2376 case spv::CapabilitySampledImageArrayDynamicIndexing: 2377 pass &= require_feature(my_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing, 2378 "shaderSampledImageArrayDynamicIndexing"); 2379 break; 2380 2381 case spv::CapabilityStorageBufferArrayDynamicIndexing: 2382 pass &= require_feature(my_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing, 2383 "shaderStorageBufferArrayDynamicIndexing"); 2384 break; 2385 2386 case spv::CapabilityStorageImageArrayDynamicIndexing: 2387 pass &= require_feature(my_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing, 2388 "shaderStorageImageArrayDynamicIndexing"); 2389 break; 2390 2391 case spv::CapabilityClipDistance: 2392 pass &= require_feature(my_data, enabledFeatures->shaderClipDistance, "shaderClipDistance"); 2393 break; 2394 2395 case spv::CapabilityCullDistance: 2396 pass &= require_feature(my_data, enabledFeatures->shaderCullDistance, "shaderCullDistance"); 2397 break; 2398 2399 case spv::CapabilityImageCubeArray: 2400 pass &= require_feature(my_data, enabledFeatures->imageCubeArray, "imageCubeArray"); 2401 break; 2402 2403 case spv::CapabilitySampleRateShading: 2404 pass &= require_feature(my_data, enabledFeatures->sampleRateShading, "sampleRateShading"); 2405 break; 2406 2407 case spv::CapabilitySparseResidency: 2408 pass &= require_feature(my_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency"); 2409 break; 2410 2411 case spv::CapabilityMinLod: 2412 pass &= require_feature(my_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod"); 2413 break; 2414 2415 case spv::CapabilitySampledCubeArray: 2416 pass &= require_feature(my_data, enabledFeatures->imageCubeArray, "imageCubeArray"); 2417 break; 2418 2419 case spv::CapabilityImageMSArray: 2420 pass &= require_feature(my_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample"); 2421 break; 2422 2423 case spv::CapabilityStorageImageExtendedFormats: 2424 pass &= require_feature(my_data, enabledFeatures->shaderStorageImageExtendedFormats, 2425 "shaderStorageImageExtendedFormats"); 2426 break; 2427 2428 case spv::CapabilityInterpolationFunction: 2429 pass &= require_feature(my_data, enabledFeatures->sampleRateShading, "sampleRateShading"); 2430 break; 2431 2432 case spv::CapabilityStorageImageReadWithoutFormat: 2433 pass &= require_feature(my_data, enabledFeatures->shaderStorageImageReadWithoutFormat, 2434 "shaderStorageImageReadWithoutFormat"); 2435 break; 2436 2437 case spv::CapabilityStorageImageWriteWithoutFormat: 2438 pass &= require_feature(my_data, enabledFeatures->shaderStorageImageWriteWithoutFormat, 2439 "shaderStorageImageWriteWithoutFormat"); 2440 break; 2441 2442 case spv::CapabilityMultiViewport: 2443 pass &= require_feature(my_data, enabledFeatures->multiViewport, "multiViewport"); 2444 break; 2445 2446 default: 2447 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 2448 __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC", 2449 "Shader declares capability %u, not supported in Vulkan.", 2450 insn.word(1))) 2451 pass = false; 2452 break; 2453 } 2454 } 2455 } 2456 2457 return pass; 2458} 2459 2460static bool validate_pipeline_shader_stage(layer_data *dev_data, VkPipelineShaderStageCreateInfo const *pStage, 2461 PIPELINE_NODE *pipeline, PIPELINE_LAYOUT_NODE *pipelineLayout, 2462 shader_module **out_module, spirv_inst_iter *out_entrypoint) { 2463 bool pass = true; 2464 auto module = *out_module = dev_data->shaderModuleMap[pStage->module].get(); 2465 pass &= validate_specialization_offsets(dev_data, pStage); 2466 2467 /* find the entrypoint */ 2468 auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage); 2469 if (entrypoint == module->end()) { 2470 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 2471 __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC", 2472 "No entrypoint found named `%s` for stage %s", pStage->pName, 2473 string_VkShaderStageFlagBits(pStage->stage))) { 2474 pass = false; 2475 } 2476 } 2477 2478 /* validate shader capabilities against enabled device features */ 2479 pass &= validate_shader_capabilities(dev_data, module); 2480 2481 /* mark accessible ids */ 2482 std::unordered_set<uint32_t> accessible_ids; 2483 mark_accessible_ids(module, entrypoint, accessible_ids); 2484 2485 /* validate descriptor set layout against what the entrypoint actually uses */ 2486 std::map<descriptor_slot_t, interface_var> descriptor_uses; 2487 collect_interface_by_descriptor_slot(dev_data, module, accessible_ids, descriptor_uses); 2488 2489 /* validate push constant usage */ 2490 pass &= validate_push_constant_usage(dev_data, &pipelineLayout->pushConstantRanges, 2491 module, accessible_ids, pStage->stage); 2492 2493 /* validate descriptor use */ 2494 for (auto use : descriptor_uses) { 2495 // While validating shaders capture which slots are used by the pipeline 2496 pipeline->active_slots[use.first.first].insert(use.first.second); 2497 2498 /* find the matching binding */ 2499 auto binding = get_descriptor_binding(dev_data, pipelineLayout, use.first); 2500 unsigned required_descriptor_count; 2501 2502 if (!binding) { 2503 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 2504 __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC", 2505 "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout", 2506 use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) { 2507 pass = false; 2508 } 2509 } else if (~binding->stageFlags & pStage->stage) { 2510 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 2511 /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC", 2512 "Shader uses descriptor slot %u.%u (used " 2513 "as type `%s`) but descriptor not " 2514 "accessible from stage %s", 2515 use.first.first, use.first.second, 2516 describe_type(module, use.second.type_id).c_str(), 2517 string_VkShaderStageFlagBits(pStage->stage))) { 2518 pass = false; 2519 } 2520 } else if (!descriptor_type_match(dev_data, module, use.second.type_id, binding->descriptorType, /*out*/ required_descriptor_count)) { 2521 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 2522 __LINE__, SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC", 2523 "Type mismatch on descriptor slot " 2524 "%u.%u (used as type `%s`) but " 2525 "descriptor of type %s", 2526 use.first.first, use.first.second, 2527 describe_type(module, use.second.type_id).c_str(), 2528 string_VkDescriptorType(binding->descriptorType))) { 2529 pass = false; 2530 } 2531 } else if (binding->descriptorCount < required_descriptor_count) { 2532 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 2533 __LINE__, SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC", 2534 "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided", 2535 required_descriptor_count, use.first.first, use.first.second, 2536 describe_type(module, use.second.type_id).c_str(), 2537 binding->descriptorCount)) { 2538 pass = false; 2539 } 2540 } 2541 } 2542 2543 return pass; 2544} 2545 2546 2547// Validate that the shaders used by the given pipeline and store the active_slots 2548// that are actually used by the pipeline into pPipeline->active_slots 2549static bool validate_and_capture_pipeline_shader_state(layer_data *my_data, PIPELINE_NODE *pPipeline) { 2550 auto pCreateInfo = reinterpret_cast<VkGraphicsPipelineCreateInfo const *>(&pPipeline->graphicsPipelineCI); 2551 int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT); 2552 int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT); 2553 2554 shader_module *shaders[5]; 2555 memset(shaders, 0, sizeof(shaders)); 2556 spirv_inst_iter entrypoints[5]; 2557 memset(entrypoints, 0, sizeof(entrypoints)); 2558 VkPipelineVertexInputStateCreateInfo const *vi = 0; 2559 bool pass = true; 2560 2561 auto pipelineLayout = pCreateInfo->layout != VK_NULL_HANDLE ? &my_data->pipelineLayoutMap[pCreateInfo->layout] : nullptr; 2562 2563 for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) { 2564 VkPipelineShaderStageCreateInfo const *pStage = 2565 reinterpret_cast<VkPipelineShaderStageCreateInfo const *>(&pCreateInfo->pStages[i]); 2566 auto stage_id = get_shader_stage_id(pStage->stage); 2567 pass &= validate_pipeline_shader_stage(my_data, pStage, pPipeline, pipelineLayout, 2568 &shaders[stage_id], &entrypoints[stage_id]); 2569 } 2570 2571 vi = pCreateInfo->pVertexInputState; 2572 2573 if (vi) { 2574 pass &= validate_vi_consistency(my_data, vi); 2575 } 2576 2577 if (shaders[vertex_stage]) { 2578 pass &= validate_vi_against_vs_inputs(my_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]); 2579 } 2580 2581 int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT); 2582 int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT); 2583 2584 while (!shaders[producer] && producer != fragment_stage) { 2585 producer++; 2586 consumer++; 2587 } 2588 2589 for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) { 2590 assert(shaders[producer]); 2591 if (shaders[consumer]) { 2592 pass &= validate_interface_between_stages(my_data, 2593 shaders[producer], entrypoints[producer], &shader_stage_attribs[producer], 2594 shaders[consumer], entrypoints[consumer], &shader_stage_attribs[consumer]); 2595 2596 producer = consumer; 2597 } 2598 } 2599 2600 auto rp = pCreateInfo->renderPass != VK_NULL_HANDLE ? my_data->renderPassMap[pCreateInfo->renderPass] : nullptr; 2601 2602 if (shaders[fragment_stage] && rp) { 2603 pass &= validate_fs_outputs_against_render_pass(my_data, shaders[fragment_stage], entrypoints[fragment_stage], rp, 2604 pCreateInfo->subpass); 2605 } 2606 2607 return pass; 2608} 2609 2610static bool validate_compute_pipeline(layer_data *my_data, PIPELINE_NODE *pPipeline) { 2611 auto pCreateInfo = reinterpret_cast<VkComputePipelineCreateInfo const *>(&pPipeline->computePipelineCI); 2612 2613 auto pipelineLayout = pCreateInfo->layout != VK_NULL_HANDLE ? &my_data->pipelineLayoutMap[pCreateInfo->layout] : nullptr; 2614 2615 shader_module *module; 2616 spirv_inst_iter entrypoint; 2617 2618 return validate_pipeline_shader_stage(my_data, &pCreateInfo->stage, pPipeline, pipelineLayout, 2619 &module, &entrypoint); 2620} 2621 2622// Return Set node ptr for specified set or else NULL 2623static SET_NODE *getSetNode(layer_data *my_data, const VkDescriptorSet set) { 2624 if (my_data->setMap.find(set) == my_data->setMap.end()) { 2625 return NULL; 2626 } 2627 return my_data->setMap[set]; 2628} 2629 2630// For given Layout Node and binding, return index where that binding begins 2631static uint32_t getBindingStartIndex(const LAYOUT_NODE *pLayout, const uint32_t binding) { 2632 uint32_t offsetIndex = 0; 2633 for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) { 2634 if (pLayout->createInfo.pBindings[i].binding == binding) 2635 break; 2636 offsetIndex += pLayout->createInfo.pBindings[i].descriptorCount; 2637 } 2638 return offsetIndex; 2639} 2640 2641// For given layout node and binding, return last index that is updated 2642static uint32_t getBindingEndIndex(const LAYOUT_NODE *pLayout, const uint32_t binding) { 2643 uint32_t offsetIndex = 0; 2644 for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) { 2645 offsetIndex += pLayout->createInfo.pBindings[i].descriptorCount; 2646 if (pLayout->createInfo.pBindings[i].binding == binding) 2647 break; 2648 } 2649 return offsetIndex - 1; 2650} 2651 2652// For the given command buffer, verify and update the state for activeSetBindingsPairs 2653// This includes: 2654// 1. Verifying that any dynamic descriptor in that set has a valid dynamic offset bound. 2655// To be valid, the dynamic offset combined with the offset and range from its 2656// descriptor update must not overflow the size of its buffer being updated 2657// 2. Grow updateImages for given pCB to include any bound STORAGE_IMAGE descriptor images 2658// 3. Grow updateBuffers for pCB to include buffers from STORAGE*_BUFFER descriptor buffers 2659static bool validate_and_update_drawtime_descriptor_state( 2660 layer_data *dev_data, GLOBAL_CB_NODE *pCB, 2661 const vector<std::pair<SET_NODE *, unordered_set<uint32_t>>> &activeSetBindingsPairs) { 2662 bool result = false; 2663 2664 VkWriteDescriptorSet *pWDS = NULL; 2665 uint32_t dynOffsetIndex = 0; 2666 VkDeviceSize bufferSize = 0; 2667 for (auto set_bindings_pair : activeSetBindingsPairs) { 2668 SET_NODE *set_node = set_bindings_pair.first; 2669 LAYOUT_NODE *layout_node = set_node->pLayout; 2670 for (auto binding : set_bindings_pair.second) { 2671 auto binding_index = layout_node->bindingToIndexMap[binding]; 2672 if ((set_node->pLayout->createInfo.pBindings[binding_index].descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER) && 2673 (set_node->pLayout->createInfo.pBindings[binding_index].descriptorCount != 0) && 2674 (set_node->pLayout->createInfo.pBindings[binding_index].pImmutableSamplers)) { 2675 // No work for immutable sampler binding 2676 } else { 2677 uint32_t startIdx = getBindingStartIndex(layout_node, binding); 2678 uint32_t endIdx = getBindingEndIndex(layout_node, binding); 2679 for (uint32_t i = startIdx; i <= endIdx; ++i) { 2680 // We did check earlier to verify that set was updated, but now make sure given slot was updated 2681 // TODO : Would be better to store set# that set is bound to so we can report set.binding[index] not updated 2682 // For immutable sampler w/o combined image, don't need to update 2683 if (!set_node->pDescriptorUpdates[i]) { 2684 result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2685 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<const uint64_t &>(set_node->set), __LINE__, 2686 DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS", 2687 "DS %#" PRIxLEAST64 " bound and active but it never had binding %u updated. It is now being used to draw so " 2688 "this will result in undefined behavior.", 2689 reinterpret_cast<const uint64_t &>(set_node->set), binding); 2690 } else { 2691 switch (set_node->pDescriptorUpdates[i]->sType) { 2692 case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET: 2693 pWDS = (VkWriteDescriptorSet *)set_node->pDescriptorUpdates[i]; 2694 2695 // Verify uniform and storage buffers actually are bound to valid memory at draw time. 2696 if ((pWDS->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) || 2697 (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) || 2698 (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER) || 2699 (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) { 2700 for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) { 2701 auto buffer_node = dev_data->bufferMap.find(pWDS->pBufferInfo[j].buffer); 2702 if (buffer_node == dev_data->bufferMap.end()) { 2703 result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2704 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 2705 reinterpret_cast<const uint64_t &>(set_node->set), __LINE__, 2706 DRAWSTATE_INVALID_BUFFER, "DS", 2707 "VkDescriptorSet (%#" PRIxLEAST64 ") %s (%#" PRIxLEAST64 ") at index #%u" 2708 " is not defined! Has vkCreateBuffer been called?", 2709 reinterpret_cast<const uint64_t &>(set_node->set), 2710 string_VkDescriptorType(pWDS->descriptorType), 2711 reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), i); 2712 } else { 2713 auto mem_entry = dev_data->memObjMap.find(buffer_node->second.mem); 2714 if (mem_entry == dev_data->memObjMap.end()) { 2715 result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2716 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 2717 reinterpret_cast<const uint64_t &>(set_node->set), __LINE__, 2718 DRAWSTATE_INVALID_BUFFER, "DS", 2719 "VkDescriptorSet (%#" PRIxLEAST64 ") %s (%#" PRIxLEAST64 ") at index" 2720 " #%u, has no memory bound to it!", 2721 reinterpret_cast<const uint64_t &>(set_node->set), 2722 string_VkDescriptorType(pWDS->descriptorType), 2723 reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), i); 2724 } 2725 } 2726 // If it's a dynamic buffer, make sure the offsets are within the buffer. 2727 if ((pWDS->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) || 2728 (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) { 2729 bufferSize = dev_data->bufferMap[pWDS->pBufferInfo[j].buffer].createInfo.size; 2730 uint32_t dynOffset = 2731 pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].dynamicOffsets[dynOffsetIndex]; 2732 if (pWDS->pBufferInfo[j].range == VK_WHOLE_SIZE) { 2733 if ((dynOffset + pWDS->pBufferInfo[j].offset) > bufferSize) { 2734 result |= log_msg( 2735 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2736 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 2737 reinterpret_cast<const uint64_t &>(set_node->set), __LINE__, 2738 DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, "DS", 2739 "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has range of " 2740 "VK_WHOLE_SIZE but dynamic offset %#" PRIxLEAST32 ". " 2741 "combined with offset %#" PRIxLEAST64 " oversteps its buffer (%#" PRIxLEAST64 2742 ") which has a size of %#" PRIxLEAST64 ".", 2743 reinterpret_cast<const uint64_t &>(set_node->set), i, dynOffset, 2744 pWDS->pBufferInfo[j].offset, 2745 reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize); 2746 } 2747 } else if ((dynOffset + pWDS->pBufferInfo[j].offset + pWDS->pBufferInfo[j].range) > 2748 bufferSize) { 2749 result |= 2750 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2751 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 2752 reinterpret_cast<const uint64_t &>(set_node->set), __LINE__, 2753 DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, "DS", 2754 "VkDescriptorSet (%#" PRIxLEAST64 2755 ") bound as set #%u has dynamic offset %#" PRIxLEAST32 ". " 2756 "Combined with offset %#" PRIxLEAST64 " and range %#" PRIxLEAST64 2757 " from its update, this oversteps its buffer " 2758 "(%#" PRIxLEAST64 ") which has a size of %#" PRIxLEAST64 ".", 2759 reinterpret_cast<const uint64_t &>(set_node->set), i, dynOffset, 2760 pWDS->pBufferInfo[j].offset, pWDS->pBufferInfo[j].range, 2761 reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize); 2762 } 2763 dynOffsetIndex++; 2764 } 2765 } 2766 } 2767 if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) { 2768 for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) { 2769 pCB->updateImages.insert(pWDS->pImageInfo[j].imageView); 2770 } 2771 } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) { 2772 for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) { 2773 assert(dev_data->bufferViewMap.find(pWDS->pTexelBufferView[j]) != dev_data->bufferViewMap.end()); 2774 pCB->updateBuffers.insert(dev_data->bufferViewMap[pWDS->pTexelBufferView[j]].buffer); 2775 } 2776 } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER || 2777 pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) { 2778 for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) { 2779 pCB->updateBuffers.insert(pWDS->pBufferInfo[j].buffer); 2780 } 2781 } 2782 i += pWDS->descriptorCount; // Advance i to end of this set of descriptors (++i at end of for loop will move 1 2783 // index past last of these descriptors) 2784 break; 2785 default: // Currently only shadowing Write update nodes so shouldn't get here 2786 assert(0); 2787 continue; 2788 } 2789 } 2790 } 2791 } 2792 } 2793 } 2794 return result; 2795} 2796// TODO : This is a temp function that naively updates bound storage images and buffers based on which descriptor sets are bound. 2797// When validate_and_update_draw_state() handles computer shaders so that active_slots is correct for compute pipelines, this 2798// function can be killed and validate_and_update_draw_state() used instead 2799static void update_shader_storage_images_and_buffers(layer_data *dev_data, GLOBAL_CB_NODE *pCB) { 2800 VkWriteDescriptorSet *pWDS = nullptr; 2801 SET_NODE *pSet = nullptr; 2802 // For the bound descriptor sets, pull off any storage images and buffers 2803 // This may be more than are actually updated depending on which are active, but for now this is a stop-gap for compute 2804 // pipelines 2805 for (auto set : pCB->lastBound[VK_PIPELINE_BIND_POINT_COMPUTE].uniqueBoundSets) { 2806 // Get the set node 2807 pSet = getSetNode(dev_data, set); 2808 // For each update in the set 2809 for (auto pUpdate : pSet->pDescriptorUpdates) { 2810 // If it's a write update to STORAGE type capture image/buffer being updated 2811 if (pUpdate && (pUpdate->sType == VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET)) { 2812 pWDS = reinterpret_cast<VkWriteDescriptorSet *>(pUpdate); 2813 if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) { 2814 for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) { 2815 pCB->updateImages.insert(pWDS->pImageInfo[j].imageView); 2816 } 2817 } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) { 2818 for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) { 2819 pCB->updateBuffers.insert(dev_data->bufferViewMap[pWDS->pTexelBufferView[j]].buffer); 2820 } 2821 } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER || 2822 pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) { 2823 for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) { 2824 pCB->updateBuffers.insert(pWDS->pBufferInfo[j].buffer); 2825 } 2826 } 2827 } 2828 } 2829 } 2830} 2831 2832// Validate overall state at the time of a draw call 2833static bool validate_and_update_draw_state(layer_data *my_data, GLOBAL_CB_NODE *pCB, const bool indexedDraw, 2834 const VkPipelineBindPoint bindPoint) { 2835 bool result = false; 2836 auto const &state = pCB->lastBound[bindPoint]; 2837 PIPELINE_NODE *pPipe = getPipeline(my_data, state.pipeline); 2838 // First check flag states 2839 if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint) 2840 result = validate_draw_state_flags(my_data, pCB, pPipe, indexedDraw); 2841 2842 // Now complete other state checks 2843 // TODO : Currently only performing next check if *something* was bound (non-zero last bound) 2844 // There is probably a better way to gate when this check happens, and to know if something *should* have been bound 2845 // We should have that check separately and then gate this check based on that check 2846 if (pPipe) { 2847 if (state.pipelineLayout) { 2848 string errorString; 2849 // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets 2850 vector<std::pair<SET_NODE *, unordered_set<uint32_t>>> activeSetBindingsPairs; 2851 for (auto setBindingPair : pPipe->active_slots) { 2852 uint32_t setIndex = setBindingPair.first; 2853 // If valid set is not bound throw an error 2854 if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) { 2855 result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 2856 __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS", 2857 "VkPipeline %#" PRIxLEAST64 " uses set #%u but that set is not bound.", 2858 (uint64_t)pPipe->pipeline, setIndex); 2859 } else if (!verify_set_layout_compatibility(my_data, my_data->setMap[state.boundDescriptorSets[setIndex]], 2860 pPipe->graphicsPipelineCI.layout, setIndex, errorString)) { 2861 // Set is bound but not compatible w/ overlapping pipelineLayout from PSO 2862 VkDescriptorSet setHandle = my_data->setMap[state.boundDescriptorSets[setIndex]]->set; 2863 result |= log_msg( 2864 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 2865 (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS", 2866 "VkDescriptorSet (%#" PRIxLEAST64 2867 ") bound as set #%u is not compatible with overlapping VkPipelineLayout %#" PRIxLEAST64 " due to: %s", 2868 (uint64_t)setHandle, setIndex, (uint64_t)pPipe->graphicsPipelineCI.layout, errorString.c_str()); 2869 } else { // Valid set is bound and layout compatible, validate that it's updated 2870 // Pull the set node 2871 SET_NODE *pSet = my_data->setMap[state.boundDescriptorSets[setIndex]]; 2872 // Save vector of all active sets to verify dynamicOffsets below 2873 activeSetBindingsPairs.push_back(std::make_pair(pSet, setBindingPair.second)); 2874 // Make sure set has been updated if it has no immutable samplers 2875 // If it has immutable samplers, we'll flag error later as needed depending on binding 2876 if (!pSet->pUpdateStructs && !pSet->pLayout->immutableSamplerCount) { 2877 result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 2878 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pSet->set, __LINE__, 2879 DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS", 2880 "DS %#" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so " 2881 "this will result in undefined behavior.", 2882 (uint64_t)pSet->set); 2883 } 2884 } 2885 } 2886 // For given active slots, verify any dynamic descriptors and record updated images & buffers 2887 result |= validate_and_update_drawtime_descriptor_state(my_data, pCB, activeSetBindingsPairs); 2888 } 2889 if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint) { 2890 // Verify Vtx binding 2891 if (pPipe->vertexBindingDescriptions.size() > 0) { 2892 for (size_t i = 0; i < pPipe->vertexBindingDescriptions.size(); i++) { 2893 if ((pCB->currentDrawData.buffers.size() < (i + 1)) || (pCB->currentDrawData.buffers[i] == VK_NULL_HANDLE)) { 2894 result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 2895 __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS", 2896 "The Pipeline State Object (%#" PRIxLEAST64 2897 ") expects that this Command Buffer's vertex binding Index " PRINTF_SIZE_T_SPECIFIER 2898 " should be set via vkCmdBindVertexBuffers.", 2899 (uint64_t)state.pipeline, i); 2900 } 2901 } 2902 } else { 2903 if (!pCB->currentDrawData.buffers.empty()) { 2904 result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, 2905 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS", 2906 "Vertex buffers are bound to command buffer (%#" PRIxLEAST64 2907 ") but no vertex buffers are attached to this Pipeline State Object (%#" PRIxLEAST64 ").", 2908 (uint64_t)pCB->commandBuffer, (uint64_t)state.pipeline); 2909 } 2910 } 2911 // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count. 2912 // Skip check if rasterization is disabled or there is no viewport. 2913 if ((!pPipe->graphicsPipelineCI.pRasterizationState || 2914 (pPipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) && 2915 pPipe->graphicsPipelineCI.pViewportState) { 2916 bool dynViewport = isDynamic(pPipe, VK_DYNAMIC_STATE_VIEWPORT); 2917 bool dynScissor = isDynamic(pPipe, VK_DYNAMIC_STATE_SCISSOR); 2918 if (dynViewport) { 2919 if (pCB->viewports.size() != pPipe->graphicsPipelineCI.pViewportState->viewportCount) { 2920 result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 2921 __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", 2922 "Dynamic viewportCount from vkCmdSetViewport() is " PRINTF_SIZE_T_SPECIFIER 2923 ", but PSO viewportCount is %u. These counts must match.", 2924 pCB->viewports.size(), pPipe->graphicsPipelineCI.pViewportState->viewportCount); 2925 } 2926 } 2927 if (dynScissor) { 2928 if (pCB->scissors.size() != pPipe->graphicsPipelineCI.pViewportState->scissorCount) { 2929 result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 2930 __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", 2931 "Dynamic scissorCount from vkCmdSetScissor() is " PRINTF_SIZE_T_SPECIFIER 2932 ", but PSO scissorCount is %u. These counts must match.", 2933 pCB->scissors.size(), pPipe->graphicsPipelineCI.pViewportState->scissorCount); 2934 } 2935 } 2936 } 2937 } 2938 } 2939 return result; 2940} 2941 2942// Validate HW line width capabilities prior to setting requested line width. 2943static bool verifyLineWidth(layer_data *my_data, DRAW_STATE_ERROR dsError, const uint64_t &target, float lineWidth) { 2944 bool skip_call = false; 2945 2946 // First check to see if the physical device supports wide lines. 2947 if ((VK_FALSE == my_data->phys_dev_properties.features.wideLines) && (1.0f != lineWidth)) { 2948 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target, __LINE__, 2949 dsError, "DS", "Attempt to set lineWidth to %f but physical device wideLines feature " 2950 "not supported/enabled so lineWidth must be 1.0f!", 2951 lineWidth); 2952 } else { 2953 // Otherwise, make sure the width falls in the valid range. 2954 if ((my_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) || 2955 (my_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) { 2956 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target, 2957 __LINE__, dsError, "DS", "Attempt to set lineWidth to %f but physical device limits line width " 2958 "to between [%f, %f]!", 2959 lineWidth, my_data->phys_dev_properties.properties.limits.lineWidthRange[0], 2960 my_data->phys_dev_properties.properties.limits.lineWidthRange[1]); 2961 } 2962 } 2963 2964 return skip_call; 2965} 2966 2967// Verify that create state for a pipeline is valid 2968static bool verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_NODE *> pPipelines, 2969 int pipelineIndex) { 2970 bool skipCall = false; 2971 2972 PIPELINE_NODE *pPipeline = pPipelines[pipelineIndex]; 2973 2974 // If create derivative bit is set, check that we've specified a base 2975 // pipeline correctly, and that the base pipeline was created to allow 2976 // derivatives. 2977 if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) { 2978 PIPELINE_NODE *pBasePipeline = nullptr; 2979 if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^ 2980 (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) { 2981 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 2982 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", 2983 "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified"); 2984 } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) { 2985 if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) { 2986 skipCall |= 2987 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 2988 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", 2989 "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline."); 2990 } else { 2991 pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex]; 2992 } 2993 } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) { 2994 pBasePipeline = getPipeline(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle); 2995 } 2996 2997 if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) { 2998 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 2999 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", 3000 "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives."); 3001 } 3002 } 3003 3004 if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) { 3005 if (!my_data->phys_dev_properties.features.independentBlend) { 3006 if (pPipeline->attachments.size() > 1) { 3007 VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0]; 3008 for (size_t i = 1; i < pPipeline->attachments.size(); i++) { 3009 if ((pAttachments[0].blendEnable != pAttachments[i].blendEnable) || 3010 (pAttachments[0].srcColorBlendFactor != pAttachments[i].srcColorBlendFactor) || 3011 (pAttachments[0].dstColorBlendFactor != pAttachments[i].dstColorBlendFactor) || 3012 (pAttachments[0].colorBlendOp != pAttachments[i].colorBlendOp) || 3013 (pAttachments[0].srcAlphaBlendFactor != pAttachments[i].srcAlphaBlendFactor) || 3014 (pAttachments[0].dstAlphaBlendFactor != pAttachments[i].dstAlphaBlendFactor) || 3015 (pAttachments[0].alphaBlendOp != pAttachments[i].alphaBlendOp) || 3016 (pAttachments[0].colorWriteMask != pAttachments[i].colorWriteMask)) { 3017 skipCall |= 3018 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3019 DRAWSTATE_INDEPENDENT_BLEND, "DS", "Invalid Pipeline CreateInfo: If independent blend feature not " 3020 "enabled, all elements of pAttachments must be identical"); 3021 } 3022 } 3023 } 3024 } 3025 if (!my_data->phys_dev_properties.features.logicOp && 3026 (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) { 3027 skipCall |= 3028 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3029 DRAWSTATE_DISABLED_LOGIC_OP, "DS", 3030 "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE"); 3031 } 3032 if ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable == VK_TRUE) && 3033 ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOp < VK_LOGIC_OP_CLEAR) || 3034 (pPipeline->graphicsPipelineCI.pColorBlendState->logicOp > VK_LOGIC_OP_SET))) { 3035 skipCall |= 3036 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3037 DRAWSTATE_INVALID_LOGIC_OP, "DS", 3038 "Invalid Pipeline CreateInfo: If logicOpEnable is VK_TRUE, logicOp must be a valid VkLogicOp value"); 3039 } 3040 } 3041 3042 // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state 3043 // produces nonsense errors that confuse users. Other layers should already 3044 // emit errors for renderpass being invalid. 3045 auto rp_data = my_data->renderPassMap.find(pPipeline->graphicsPipelineCI.renderPass); 3046 if (rp_data != my_data->renderPassMap.end() && 3047 pPipeline->graphicsPipelineCI.subpass >= rp_data->second->pCreateInfo->subpassCount) { 3048 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3049 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u " 3050 "is out of range for this renderpass (0..%u)", 3051 pPipeline->graphicsPipelineCI.subpass, rp_data->second->pCreateInfo->subpassCount - 1); 3052 } 3053 3054 if (!validate_and_capture_pipeline_shader_state(my_data, pPipeline)) { 3055 skipCall = true; 3056 } 3057 // Each shader's stage must be unique 3058 if (pPipeline->duplicate_shaders) { 3059 for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) { 3060 if (pPipeline->duplicate_shaders & stage) { 3061 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, 3062 __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", 3063 "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s", 3064 string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage))); 3065 } 3066 } 3067 } 3068 // VS is required 3069 if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) { 3070 skipCall |= 3071 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3072 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vtx Shader required"); 3073 } 3074 // Either both or neither TC/TE shaders should be defined 3075 if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) != 3076 ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) { 3077 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3078 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", 3079 "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair"); 3080 } 3081 // Compute shaders should be specified independent of Gfx shaders 3082 if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) && 3083 (pPipeline->active_shaders & 3084 (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT | 3085 VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) { 3086 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3087 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", 3088 "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline"); 3089 } 3090 // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines. 3091 // Mismatching primitive topology and tessellation fails graphics pipeline creation. 3092 if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) && 3093 (!pPipeline->graphicsPipelineCI.pInputAssemblyState || 3094 pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) { 3095 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3096 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: " 3097 "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA " 3098 "topology for tessellation pipelines"); 3099 } 3100 if (pPipeline->graphicsPipelineCI.pInputAssemblyState && 3101 pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) { 3102 if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) { 3103 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3104 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: " 3105 "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive " 3106 "topology is only valid for tessellation pipelines"); 3107 } 3108 if (!pPipeline->graphicsPipelineCI.pTessellationState) { 3109 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3110 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", 3111 "Invalid Pipeline CreateInfo State: " 3112 "pTessellationState is NULL when VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive " 3113 "topology used. pTessellationState must not be NULL in this case."); 3114 } else if (!pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints || 3115 (pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints > 32)) { 3116 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3117 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: " 3118 "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive " 3119 "topology used with patchControlPoints value %u." 3120 " patchControlPoints should be >0 and <=32.", 3121 pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints); 3122 } 3123 } 3124 // If a rasterization state is provided, make sure that the line width conforms to the HW. 3125 if (pPipeline->graphicsPipelineCI.pRasterizationState) { 3126 if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) { 3127 skipCall |= verifyLineWidth(my_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, reinterpret_cast<uint64_t &>(pPipeline), 3128 pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth); 3129 } 3130 } 3131 // Viewport state must be included if rasterization is enabled. 3132 // If the viewport state is included, the viewport and scissor counts should always match. 3133 // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler 3134 if (!pPipeline->graphicsPipelineCI.pRasterizationState || 3135 (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) { 3136 if (!pPipeline->graphicsPipelineCI.pViewportState) { 3137 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3138 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport " 3139 "and scissors are dynamic PSO must include " 3140 "viewportCount and scissorCount in pViewportState."); 3141 } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount != 3142 pPipeline->graphicsPipelineCI.pViewportState->viewportCount) { 3143 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3144 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", 3145 "Gfx Pipeline viewport count (%u) must match scissor count (%u).", 3146 pPipeline->graphicsPipelineCI.pViewportState->viewportCount, 3147 pPipeline->graphicsPipelineCI.pViewportState->scissorCount); 3148 } else { 3149 // If viewport or scissor are not dynamic, then verify that data is appropriate for count 3150 bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT); 3151 bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR); 3152 if (!dynViewport) { 3153 if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount && 3154 !pPipeline->graphicsPipelineCI.pViewportState->pViewports) { 3155 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 3156 __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", 3157 "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you " 3158 "must either include pViewports data, or include viewport in pDynamicState and set it with " 3159 "vkCmdSetViewport().", 3160 pPipeline->graphicsPipelineCI.pViewportState->viewportCount); 3161 } 3162 } 3163 if (!dynScissor) { 3164 if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount && 3165 !pPipeline->graphicsPipelineCI.pViewportState->pScissors) { 3166 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 3167 __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", 3168 "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you " 3169 "must either include pScissors data, or include scissor in pDynamicState and set it with " 3170 "vkCmdSetScissor().", 3171 pPipeline->graphicsPipelineCI.pViewportState->scissorCount); 3172 } 3173 } 3174 } 3175 } 3176 return skipCall; 3177} 3178 3179// Free the Pipeline nodes 3180static void deletePipelines(layer_data *my_data) { 3181 if (my_data->pipelineMap.size() <= 0) 3182 return; 3183 for (auto &pipe_map_pair : my_data->pipelineMap) { 3184 delete pipe_map_pair.second; 3185 } 3186 my_data->pipelineMap.clear(); 3187} 3188 3189// For given pipeline, return number of MSAA samples, or one if MSAA disabled 3190static VkSampleCountFlagBits getNumSamples(layer_data *my_data, const VkPipeline pipeline) { 3191 PIPELINE_NODE *pPipe = my_data->pipelineMap[pipeline]; 3192 if (pPipe->graphicsPipelineCI.pMultisampleState && 3193 (VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pPipe->graphicsPipelineCI.pMultisampleState->sType)) { 3194 return pPipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples; 3195 } 3196 return VK_SAMPLE_COUNT_1_BIT; 3197} 3198 3199// Validate state related to the PSO 3200static bool validatePipelineState(layer_data *my_data, const GLOBAL_CB_NODE *pCB, const VkPipelineBindPoint pipelineBindPoint, 3201 const VkPipeline pipeline) { 3202 bool skipCall = false; 3203 if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) { 3204 // Verify that any MSAA request in PSO matches sample# in bound FB 3205 // Skip the check if rasterization is disabled. 3206 PIPELINE_NODE *pPipeline = my_data->pipelineMap[pipeline]; 3207 if (!pPipeline->graphicsPipelineCI.pRasterizationState || 3208 (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) { 3209 VkSampleCountFlagBits psoNumSamples = getNumSamples(my_data, pipeline); 3210 if (pCB->activeRenderPass) { 3211 const VkRenderPassCreateInfo *pRPCI = my_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo; 3212 const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass]; 3213 VkSampleCountFlagBits subpassNumSamples = (VkSampleCountFlagBits)0; 3214 uint32_t i; 3215 3216 const VkPipelineColorBlendStateCreateInfo *pColorBlendState = pPipeline->graphicsPipelineCI.pColorBlendState; 3217 if ((pColorBlendState != NULL) && (pCB->activeSubpass == pPipeline->graphicsPipelineCI.subpass) && 3218 (pColorBlendState->attachmentCount != pSD->colorAttachmentCount)) { 3219 return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, 3220 reinterpret_cast<const uint64_t &>(pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 3221 "Render pass subpass %u mismatch with blending state defined and blend state attachment " 3222 "count %u but subpass color attachment count %u! These must be the same.", 3223 pCB->activeSubpass, pColorBlendState->attachmentCount, pSD->colorAttachmentCount); 3224 } 3225 3226 for (i = 0; i < pSD->colorAttachmentCount; i++) { 3227 VkSampleCountFlagBits samples; 3228 3229 if (pSD->pColorAttachments[i].attachment == VK_ATTACHMENT_UNUSED) 3230 continue; 3231 3232 samples = pRPCI->pAttachments[pSD->pColorAttachments[i].attachment].samples; 3233 if (subpassNumSamples == (VkSampleCountFlagBits)0) { 3234 subpassNumSamples = samples; 3235 } else if (subpassNumSamples != samples) { 3236 subpassNumSamples = (VkSampleCountFlagBits)-1; 3237 break; 3238 } 3239 } 3240 if (pSD->pDepthStencilAttachment && pSD->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { 3241 const VkSampleCountFlagBits samples = pRPCI->pAttachments[pSD->pDepthStencilAttachment->attachment].samples; 3242 if (subpassNumSamples == (VkSampleCountFlagBits)0) 3243 subpassNumSamples = samples; 3244 else if (subpassNumSamples != samples) 3245 subpassNumSamples = (VkSampleCountFlagBits)-1; 3246 } 3247 3248 if ((pSD->colorAttachmentCount > 0 || pSD->pDepthStencilAttachment) && 3249 psoNumSamples != subpassNumSamples) { 3250 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, 3251 (uint64_t)pipeline, __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS", 3252 "Num samples mismatch! Binding PSO (%#" PRIxLEAST64 3253 ") with %u samples while current RenderPass (%#" PRIxLEAST64 ") w/ %u samples!", 3254 (uint64_t)pipeline, psoNumSamples, (uint64_t)pCB->activeRenderPass, subpassNumSamples); 3255 } 3256 } else { 3257 // TODO : I believe it's an error if we reach this point and don't have an activeRenderPass 3258 // Verify and flag error as appropriate 3259 } 3260 } 3261 // TODO : Add more checks here 3262 } else { 3263 // TODO : Validate non-gfx pipeline updates 3264 } 3265 return skipCall; 3266} 3267 3268// Block of code at start here specifically for managing/tracking DSs 3269 3270// Return Pool node ptr for specified pool or else NULL 3271static DESCRIPTOR_POOL_NODE *getPoolNode(layer_data *my_data, const VkDescriptorPool pool) { 3272 if (my_data->descriptorPoolMap.find(pool) == my_data->descriptorPoolMap.end()) { 3273 return NULL; 3274 } 3275 return my_data->descriptorPoolMap[pool]; 3276} 3277 3278static LAYOUT_NODE *getLayoutNode(layer_data *my_data, const VkDescriptorSetLayout layout) { 3279 if (my_data->descriptorSetLayoutMap.find(layout) == my_data->descriptorSetLayoutMap.end()) { 3280 return NULL; 3281 } 3282 return my_data->descriptorSetLayoutMap[layout]; 3283} 3284 3285// Return false if update struct is of valid type, otherwise flag error and return code from callback 3286static bool validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) { 3287 switch (pUpdateStruct->sType) { 3288 case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET: 3289 case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET: 3290 return false; 3291 default: 3292 return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3293 DRAWSTATE_INVALID_UPDATE_STRUCT, "DS", 3294 "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree", 3295 string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType); 3296 } 3297} 3298 3299// Set count for given update struct in the last parameter 3300static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) { 3301 switch (pUpdateStruct->sType) { 3302 case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET: 3303 return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount; 3304 case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET: 3305 // TODO : Need to understand this case better and make sure code is correct 3306 return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount; 3307 default: 3308 return 0; 3309 } 3310} 3311 3312// For given layout and update, return the first overall index of the layout that is updated 3313static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout, const uint32_t binding, 3314 const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) { 3315 return getBindingStartIndex(pLayout, binding) + arrayIndex; 3316} 3317 3318// For given layout and update, return the last overall index of the layout that is updated 3319static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout, const uint32_t binding, 3320 const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) { 3321 uint32_t count = getUpdateCount(my_data, device, pUpdateStruct); 3322 return getBindingStartIndex(pLayout, binding) + arrayIndex + count - 1; 3323} 3324 3325// Verify that the descriptor type in the update struct matches what's expected by the layout 3326static bool validateUpdateConsistency(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout, 3327 const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) { 3328 // First get actual type of update 3329 bool skipCall = false; 3330 VkDescriptorType actualType = VK_DESCRIPTOR_TYPE_MAX_ENUM; 3331 uint32_t i = 0; 3332 switch (pUpdateStruct->sType) { 3333 case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET: 3334 actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType; 3335 break; 3336 case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET: 3337 /* no need to validate */ 3338 return false; 3339 break; 3340 default: 3341 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3342 DRAWSTATE_INVALID_UPDATE_STRUCT, "DS", 3343 "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree", 3344 string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType); 3345 } 3346 if (!skipCall) { 3347 // Set first stageFlags as reference and verify that all other updates match it 3348 VkShaderStageFlags refStageFlags = pLayout->stageFlags[startIndex]; 3349 for (i = startIndex; i <= endIndex; i++) { 3350 if (pLayout->descriptorTypes[i] != actualType) { 3351 skipCall |= log_msg( 3352 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3353 DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS", 3354 "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!", 3355 string_VkDescriptorType(actualType), string_VkDescriptorType(pLayout->descriptorTypes[i])); 3356 } 3357 if (pLayout->stageFlags[i] != refStageFlags) { 3358 skipCall |= log_msg( 3359 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3360 DRAWSTATE_DESCRIPTOR_STAGEFLAGS_MISMATCH, "DS", 3361 "Write descriptor update has stageFlags %x that do not match overlapping binding descriptor stageFlags of %x!", 3362 refStageFlags, pLayout->stageFlags[i]); 3363 } 3364 } 3365 } 3366 return skipCall; 3367} 3368 3369// Determine the update type, allocate a new struct of that type, shadow the given pUpdate 3370// struct into the pNewNode param. Return true if error condition encountered and callback signals early exit. 3371// NOTE : Calls to this function should be wrapped in mutex 3372static bool shadowUpdateNode(layer_data *my_data, const VkDevice device, GENERIC_HEADER *pUpdate, GENERIC_HEADER **pNewNode) { 3373 bool skipCall = false; 3374 VkWriteDescriptorSet *pWDS = NULL; 3375 VkCopyDescriptorSet *pCDS = NULL; 3376 switch (pUpdate->sType) { 3377 case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET: 3378 pWDS = new VkWriteDescriptorSet; 3379 *pNewNode = (GENERIC_HEADER *)pWDS; 3380 memcpy(pWDS, pUpdate, sizeof(VkWriteDescriptorSet)); 3381 3382 switch (pWDS->descriptorType) { 3383 case VK_DESCRIPTOR_TYPE_SAMPLER: 3384 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: 3385 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: 3386 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: { 3387 VkDescriptorImageInfo *info = new VkDescriptorImageInfo[pWDS->descriptorCount]; 3388 memcpy(info, pWDS->pImageInfo, pWDS->descriptorCount * sizeof(VkDescriptorImageInfo)); 3389 pWDS->pImageInfo = info; 3390 } break; 3391 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: 3392 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: { 3393 VkBufferView *info = new VkBufferView[pWDS->descriptorCount]; 3394 memcpy(info, pWDS->pTexelBufferView, pWDS->descriptorCount * sizeof(VkBufferView)); 3395 pWDS->pTexelBufferView = info; 3396 } break; 3397 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: 3398 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: 3399 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: 3400 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: { 3401 VkDescriptorBufferInfo *info = new VkDescriptorBufferInfo[pWDS->descriptorCount]; 3402 memcpy(info, pWDS->pBufferInfo, pWDS->descriptorCount * sizeof(VkDescriptorBufferInfo)); 3403 pWDS->pBufferInfo = info; 3404 } break; 3405 default: 3406 return true; 3407 break; 3408 } 3409 break; 3410 case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET: 3411 pCDS = new VkCopyDescriptorSet; 3412 *pNewNode = (GENERIC_HEADER *)pCDS; 3413 memcpy(pCDS, pUpdate, sizeof(VkCopyDescriptorSet)); 3414 break; 3415 default: 3416 if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 3417 DRAWSTATE_INVALID_UPDATE_STRUCT, "DS", 3418 "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree", 3419 string_VkStructureType(pUpdate->sType), pUpdate->sType)) 3420 return true; 3421 } 3422 // Make sure that pNext for the end of shadow copy is NULL 3423 (*pNewNode)->pNext = NULL; 3424 return skipCall; 3425} 3426 3427// Verify that given sampler is valid 3428static bool validateSampler(const layer_data *my_data, const VkSampler *pSampler, const bool immutable) { 3429 bool skipCall = false; 3430 auto sampIt = my_data->sampleMap.find(*pSampler); 3431 if (sampIt == my_data->sampleMap.end()) { 3432 if (!immutable) { 3433 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT, 3434 (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS", 3435 "vkUpdateDescriptorSets: Attempt to update descriptor with invalid sampler %#" PRIxLEAST64, 3436 (uint64_t)*pSampler); 3437 } else { // immutable 3438 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT, 3439 (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS", 3440 "vkUpdateDescriptorSets: Attempt to update descriptor whose binding has an invalid immutable " 3441 "sampler %#" PRIxLEAST64, 3442 (uint64_t)*pSampler); 3443 } 3444 } else { 3445 // TODO : Any further checks we want to do on the sampler? 3446 } 3447 return skipCall; 3448} 3449 3450//TODO: Consolidate functions 3451bool FindLayout(const GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) { 3452 layer_data *my_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map); 3453 if (!(imgpair.subresource.aspectMask & aspectMask)) { 3454 return false; 3455 } 3456 VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask; 3457 imgpair.subresource.aspectMask = aspectMask; 3458 auto imgsubIt = pCB->imageLayoutMap.find(imgpair); 3459 if (imgsubIt == pCB->imageLayoutMap.end()) { 3460 return false; 3461 } 3462 if (node.layout != VK_IMAGE_LAYOUT_MAX_ENUM && node.layout != imgsubIt->second.layout) { 3463 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 3464 reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS", 3465 "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s", 3466 reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.layout), string_VkImageLayout(imgsubIt->second.layout)); 3467 } 3468 if (node.initialLayout != VK_IMAGE_LAYOUT_MAX_ENUM && node.initialLayout != imgsubIt->second.initialLayout) { 3469 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 3470 reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS", 3471 "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple initial layout types: %s and %s", 3472 reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.initialLayout), string_VkImageLayout(imgsubIt->second.initialLayout)); 3473 } 3474 node = imgsubIt->second; 3475 return true; 3476} 3477 3478bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) { 3479 if (!(imgpair.subresource.aspectMask & aspectMask)) { 3480 return false; 3481 } 3482 VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask; 3483 imgpair.subresource.aspectMask = aspectMask; 3484 auto imgsubIt = my_data->imageLayoutMap.find(imgpair); 3485 if (imgsubIt == my_data->imageLayoutMap.end()) { 3486 return false; 3487 } 3488 if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) { 3489 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 3490 reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS", 3491 "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s", 3492 reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(layout), string_VkImageLayout(imgsubIt->second.layout)); 3493 } 3494 layout = imgsubIt->second.layout; 3495 return true; 3496} 3497 3498// find layout(s) on the cmd buf level 3499bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) { 3500 ImageSubresourcePair imgpair = {image, true, range}; 3501 node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM); 3502 FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT); 3503 FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_DEPTH_BIT); 3504 FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_STENCIL_BIT); 3505 FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_METADATA_BIT); 3506 if (node.layout == VK_IMAGE_LAYOUT_MAX_ENUM) { 3507 imgpair = {image, false, VkImageSubresource()}; 3508 auto imgsubIt = pCB->imageLayoutMap.find(imgpair); 3509 if (imgsubIt == pCB->imageLayoutMap.end()) 3510 return false; 3511 node = imgsubIt->second; 3512 } 3513 return true; 3514} 3515 3516// find layout(s) on the global level 3517bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) { 3518 layout = VK_IMAGE_LAYOUT_MAX_ENUM; 3519 FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT); 3520 FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT); 3521 FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT); 3522 FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT); 3523 if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) { 3524 imgpair = {imgpair.image, false, VkImageSubresource()}; 3525 auto imgsubIt = my_data->imageLayoutMap.find(imgpair); 3526 if (imgsubIt == my_data->imageLayoutMap.end()) 3527 return false; 3528 layout = imgsubIt->second.layout; 3529 } 3530 return true; 3531} 3532 3533bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) { 3534 ImageSubresourcePair imgpair = {image, true, range}; 3535 return FindLayout(my_data, imgpair, layout); 3536} 3537 3538bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) { 3539 auto sub_data = my_data->imageSubresourceMap.find(image); 3540 if (sub_data == my_data->imageSubresourceMap.end()) 3541 return false; 3542 auto imgIt = my_data->imageMap.find(image); 3543 if (imgIt == my_data->imageMap.end()) 3544 return false; 3545 bool ignoreGlobal = false; 3546 // TODO: Make this robust for >1 aspect mask. Now it will just say ignore 3547 // potential errors in this case. 3548 if (sub_data->second.size() >= (imgIt->second.createInfo.arrayLayers * imgIt->second.createInfo.mipLevels + 1)) { 3549 ignoreGlobal = true; 3550 } 3551 for (auto imgsubpair : sub_data->second) { 3552 if (ignoreGlobal && !imgsubpair.hasSubresource) 3553 continue; 3554 auto img_data = my_data->imageLayoutMap.find(imgsubpair); 3555 if (img_data != my_data->imageLayoutMap.end()) { 3556 layouts.push_back(img_data->second.layout); 3557 } 3558 } 3559 return true; 3560} 3561 3562// Set the layout on the global level 3563void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) { 3564 VkImage &image = imgpair.image; 3565 // TODO (mlentine): Maybe set format if new? Not used atm. 3566 my_data->imageLayoutMap[imgpair].layout = layout; 3567 // TODO (mlentine): Maybe make vector a set? 3568 auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair); 3569 if (subresource == my_data->imageSubresourceMap[image].end()) { 3570 my_data->imageSubresourceMap[image].push_back(imgpair); 3571 } 3572} 3573 3574// Set the layout on the cmdbuf level 3575void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) { 3576 pCB->imageLayoutMap[imgpair] = node; 3577 // TODO (mlentine): Maybe make vector a set? 3578 auto subresource = 3579 std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair); 3580 if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) { 3581 pCB->imageSubresourceMap[imgpair.image].push_back(imgpair); 3582 } 3583} 3584 3585void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) { 3586 // TODO (mlentine): Maybe make vector a set? 3587 if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) != 3588 pCB->imageSubresourceMap[imgpair.image].end()) { 3589 pCB->imageLayoutMap[imgpair].layout = layout; 3590 } else { 3591 // TODO (mlentine): Could be expensive and might need to be removed. 3592 assert(imgpair.hasSubresource); 3593 IMAGE_CMD_BUF_LAYOUT_NODE node; 3594 if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) { 3595 node.initialLayout = layout; 3596 } 3597 SetLayout(pCB, imgpair, {node.initialLayout, layout}); 3598 } 3599} 3600 3601template <class OBJECT, class LAYOUT> 3602void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) { 3603 if (imgpair.subresource.aspectMask & aspectMask) { 3604 imgpair.subresource.aspectMask = aspectMask; 3605 SetLayout(pObject, imgpair, layout); 3606 } 3607} 3608 3609template <class OBJECT, class LAYOUT> 3610void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) { 3611 ImageSubresourcePair imgpair = {image, true, range}; 3612 SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT); 3613 SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT); 3614 SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT); 3615 SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT); 3616} 3617 3618template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) { 3619 ImageSubresourcePair imgpair = {image, false, VkImageSubresource()}; 3620 SetLayout(pObject, image, imgpair, layout); 3621} 3622 3623void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) { 3624 auto image_view_data = dev_data->imageViewMap.find(imageView); 3625 assert(image_view_data != dev_data->imageViewMap.end()); 3626 const VkImage &image = image_view_data->second.image; 3627 const VkImageSubresourceRange &subRange = image_view_data->second.subresourceRange; 3628 // TODO: Do not iterate over every possibility - consolidate where possible 3629 for (uint32_t j = 0; j < subRange.levelCount; j++) { 3630 uint32_t level = subRange.baseMipLevel + j; 3631 for (uint32_t k = 0; k < subRange.layerCount; k++) { 3632 uint32_t layer = subRange.baseArrayLayer + k; 3633 VkImageSubresource sub = {subRange.aspectMask, level, layer}; 3634 SetLayout(pCB, image, sub, layout); 3635 } 3636 } 3637} 3638 3639// Verify that given imageView is valid 3640static bool validateImageView(const layer_data *my_data, const VkImageView *pImageView, const VkImageLayout imageLayout) { 3641 bool skipCall = false; 3642 auto ivIt = my_data->imageViewMap.find(*pImageView); 3643 if (ivIt == my_data->imageViewMap.end()) { 3644 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, 3645 (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS", 3646 "vkUpdateDescriptorSets: Attempt to update descriptor with invalid imageView %#" PRIxLEAST64, 3647 (uint64_t)*pImageView); 3648 } else { 3649 // Validate that imageLayout is compatible with aspectMask and image format 3650 VkImageAspectFlags aspectMask = ivIt->second.subresourceRange.aspectMask; 3651 VkImage image = ivIt->second.image; 3652 // TODO : Check here in case we have a bad image 3653 VkFormat format = VK_FORMAT_MAX_ENUM; 3654 auto imgIt = my_data->imageMap.find(image); 3655 if (imgIt != my_data->imageMap.end()) { 3656 format = (*imgIt).second.createInfo.format; 3657 } else { 3658 // Also need to check the swapchains. 3659 auto swapchainIt = my_data->device_extensions.imageToSwapchainMap.find(image); 3660 if (swapchainIt != my_data->device_extensions.imageToSwapchainMap.end()) { 3661 VkSwapchainKHR swapchain = swapchainIt->second; 3662 auto swapchain_nodeIt = my_data->device_extensions.swapchainMap.find(swapchain); 3663 if (swapchain_nodeIt != my_data->device_extensions.swapchainMap.end()) { 3664 SWAPCHAIN_NODE *pswapchain_node = swapchain_nodeIt->second; 3665 format = pswapchain_node->createInfo.imageFormat; 3666 } 3667 } 3668 } 3669 if (format == VK_FORMAT_MAX_ENUM) { 3670 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 3671 (uint64_t)image, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS", 3672 "vkUpdateDescriptorSets: Attempt to update descriptor with invalid image %#" PRIxLEAST64 3673 " in imageView %#" PRIxLEAST64, 3674 (uint64_t)image, (uint64_t)*pImageView); 3675 } else { 3676 bool ds = vk_format_is_depth_or_stencil(format); 3677 switch (imageLayout) { 3678 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: 3679 // Only Color bit must be set 3680 if ((aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) { 3681 skipCall |= 3682 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, 3683 (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS", 3684 "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL " 3685 "and imageView %#" PRIxLEAST64 "" 3686 " that does not have VK_IMAGE_ASPECT_COLOR_BIT set.", 3687 (uint64_t)*pImageView); 3688 } 3689 // format must NOT be DS 3690 if (ds) { 3691 skipCall |= 3692 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, 3693 (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS", 3694 "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL " 3695 "and imageView %#" PRIxLEAST64 "" 3696 " but the image format is %s which is not a color format.", 3697 (uint64_t)*pImageView, string_VkFormat(format)); 3698 } 3699 break; 3700 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: 3701 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: 3702 // Depth or stencil bit must be set, but both must NOT be set 3703 if (aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) { 3704 if (aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) { 3705 // both must NOT be set 3706 skipCall |= 3707 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, 3708 (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS", 3709 "vkUpdateDescriptorSets: Updating descriptor with imageView %#" PRIxLEAST64 "" 3710 " that has both STENCIL and DEPTH aspects set", 3711 (uint64_t)*pImageView); 3712 } 3713 } else if (!(aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)) { 3714 // Neither were set 3715 skipCall |= 3716 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, 3717 (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS", 3718 "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 "" 3719 " that does not have STENCIL or DEPTH aspect set.", 3720 string_VkImageLayout(imageLayout), (uint64_t)*pImageView); 3721 } 3722 // format must be DS 3723 if (!ds) { 3724 skipCall |= 3725 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, 3726 (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS", 3727 "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 "" 3728 " but the image format is %s which is not a depth/stencil format.", 3729 string_VkImageLayout(imageLayout), (uint64_t)*pImageView, string_VkFormat(format)); 3730 } 3731 break; 3732 default: 3733 // anything to check for other layouts? 3734 break; 3735 } 3736 } 3737 } 3738 return skipCall; 3739} 3740 3741// Verify that given bufferView is valid 3742static bool validateBufferView(const layer_data *my_data, const VkBufferView *pBufferView) { 3743 bool skipCall = false; 3744 auto sampIt = my_data->bufferViewMap.find(*pBufferView); 3745 if (sampIt == my_data->bufferViewMap.end()) { 3746 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT, 3747 (uint64_t)*pBufferView, __LINE__, DRAWSTATE_BUFFERVIEW_DESCRIPTOR_ERROR, "DS", 3748 "vkUpdateDescriptorSets: Attempt to update descriptor with invalid bufferView %#" PRIxLEAST64, 3749 (uint64_t)*pBufferView); 3750 } else { 3751 // TODO : Any further checks we want to do on the bufferView? 3752 } 3753 return skipCall; 3754} 3755 3756// Verify that given bufferInfo is valid 3757static bool validateBufferInfo(const layer_data *my_data, const VkDescriptorBufferInfo *pBufferInfo) { 3758 bool skipCall = false; 3759 auto sampIt = my_data->bufferMap.find(pBufferInfo->buffer); 3760 if (sampIt == my_data->bufferMap.end()) { 3761 skipCall |= 3762 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, 3763 (uint64_t)pBufferInfo->buffer, __LINE__, DRAWSTATE_BUFFERINFO_DESCRIPTOR_ERROR, "DS", 3764 "vkUpdateDescriptorSets: Attempt to update descriptor where bufferInfo has invalid buffer %#" PRIxLEAST64, 3765 (uint64_t)pBufferInfo->buffer); 3766 } else { 3767 // TODO : Any further checks we want to do on the bufferView? 3768 } 3769 return skipCall; 3770} 3771 3772static bool validateUpdateContents(const layer_data *my_data, const VkWriteDescriptorSet *pWDS, 3773 const VkDescriptorSetLayoutBinding *pLayoutBinding) { 3774 bool skipCall = false; 3775 // First verify that for the given Descriptor type, the correct DescriptorInfo data is supplied 3776 const VkSampler *pSampler = NULL; 3777 bool immutable = false; 3778 uint32_t i = 0; 3779 // For given update type, verify that update contents are correct 3780 switch (pWDS->descriptorType) { 3781 case VK_DESCRIPTOR_TYPE_SAMPLER: 3782 for (i = 0; i < pWDS->descriptorCount; ++i) { 3783 skipCall |= validateSampler(my_data, &(pWDS->pImageInfo[i].sampler), immutable); 3784 } 3785 break; 3786 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: 3787 for (i = 0; i < pWDS->descriptorCount; ++i) { 3788 if (NULL == pLayoutBinding->pImmutableSamplers) { 3789 pSampler = &(pWDS->pImageInfo[i].sampler); 3790 if (immutable) { 3791 skipCall |= log_msg( 3792 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT, 3793 (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS", 3794 "vkUpdateDescriptorSets: Update #%u is not an immutable sampler %#" PRIxLEAST64 3795 ", but previous update(s) from this " 3796 "VkWriteDescriptorSet struct used an immutable sampler. All updates from a single struct must either " 3797 "use immutable or non-immutable samplers.", 3798 i, (uint64_t)*pSampler); 3799 } 3800 } else { 3801 if (i > 0 && !immutable) { 3802 skipCall |= log_msg( 3803 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT, 3804 (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS", 3805 "vkUpdateDescriptorSets: Update #%u is an immutable sampler, but previous update(s) from this " 3806 "VkWriteDescriptorSet struct used a non-immutable sampler. All updates from a single struct must either " 3807 "use immutable or non-immutable samplers.", 3808 i); 3809 } 3810 immutable = true; 3811 pSampler = &(pLayoutBinding->pImmutableSamplers[i]); 3812 } 3813 skipCall |= validateSampler(my_data, pSampler, immutable); 3814 } 3815 // Intentionally fall through here to also validate image stuff 3816 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: 3817 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: 3818 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: 3819 for (i = 0; i < pWDS->descriptorCount; ++i) { 3820 skipCall |= validateImageView(my_data, &(pWDS->pImageInfo[i].imageView), pWDS->pImageInfo[i].imageLayout); 3821 } 3822 break; 3823 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: 3824 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: 3825 for (i = 0; i < pWDS->descriptorCount; ++i) { 3826 skipCall |= validateBufferView(my_data, &(pWDS->pTexelBufferView[i])); 3827 } 3828 break; 3829 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: 3830 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: 3831 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: 3832 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: 3833 for (i = 0; i < pWDS->descriptorCount; ++i) { 3834 skipCall |= validateBufferInfo(my_data, &(pWDS->pBufferInfo[i])); 3835 } 3836 break; 3837 default: 3838 break; 3839 } 3840 return skipCall; 3841} 3842// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer 3843// func_str is the name of the calling function 3844// Return false if no errors occur 3845// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain) 3846static bool validateIdleDescriptorSet(const layer_data *my_data, VkDescriptorSet set, std::string func_str) { 3847 bool skip_call = false; 3848 auto set_node = my_data->setMap.find(set); 3849 if (set_node == my_data->setMap.end()) { 3850 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 3851 (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS", 3852 "Cannot call %s() on descriptor set %" PRIxLEAST64 " that has not been allocated.", func_str.c_str(), 3853 (uint64_t)(set)); 3854 } else { 3855 if (set_node->second->in_use.load()) { 3856 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 3857 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_OBJECT_INUSE, 3858 "DS", "Cannot call %s() on descriptor set %" PRIxLEAST64 " that is in use by a command buffer.", 3859 func_str.c_str(), (uint64_t)(set)); 3860 } 3861 } 3862 return skip_call; 3863} 3864static void invalidateBoundCmdBuffers(layer_data *dev_data, const SET_NODE *pSet) { 3865 // Flag any CBs this set is bound to as INVALID 3866 for (auto cb : pSet->boundCmdBuffers) { 3867 auto cb_node = dev_data->commandBufferMap.find(cb); 3868 if (cb_node != dev_data->commandBufferMap.end()) { 3869 cb_node->second->state = CB_INVALID; 3870 } 3871 } 3872} 3873// update DS mappings based on write and copy update arrays 3874static bool dsUpdate(layer_data *my_data, VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pWDS, 3875 uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pCDS) { 3876 bool skipCall = false; 3877 3878 LAYOUT_NODE *pLayout = NULL; 3879 VkDescriptorSetLayoutCreateInfo *pLayoutCI = NULL; 3880 // Validate Write updates 3881 uint32_t i = 0; 3882 for (i = 0; i < descriptorWriteCount; i++) { 3883 VkDescriptorSet ds = pWDS[i].dstSet; 3884 SET_NODE *pSet = my_data->setMap[ds]; 3885 // Set being updated cannot be in-flight 3886 if ((skipCall = validateIdleDescriptorSet(my_data, ds, "VkUpdateDescriptorSets")) == true) 3887 return skipCall; 3888 // If set is bound to any cmdBuffers, mark them invalid 3889 invalidateBoundCmdBuffers(my_data, pSet); 3890 GENERIC_HEADER *pUpdate = (GENERIC_HEADER *)&pWDS[i]; 3891 pLayout = pSet->pLayout; 3892 // First verify valid update struct 3893 if ((skipCall = validUpdateStruct(my_data, device, pUpdate)) == true) { 3894 break; 3895 } 3896 uint32_t binding = 0, endIndex = 0; 3897 binding = pWDS[i].dstBinding; 3898 auto bindingToIndex = pLayout->bindingToIndexMap.find(binding); 3899 // Make sure that layout being updated has the binding being updated 3900 if (bindingToIndex == pLayout->bindingToIndexMap.end()) { 3901 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 3902 (uint64_t)(ds), __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS", 3903 "Descriptor Set %" PRIu64 " does not have binding to match " 3904 "update binding %u for update type " 3905 "%s!", 3906 (uint64_t)(ds), binding, string_VkStructureType(pUpdate->sType)); 3907 } else { 3908 // Next verify that update falls within size of given binding 3909 endIndex = getUpdateEndIndex(my_data, device, pLayout, binding, pWDS[i].dstArrayElement, pUpdate); 3910 if (getBindingEndIndex(pLayout, binding) < endIndex) { 3911 pLayoutCI = &pLayout->createInfo; 3912 string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS} "); 3913 skipCall |= 3914 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 3915 (uint64_t)(ds), __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS", 3916 "Descriptor update type of %s is out of bounds for matching binding %u in Layout w/ CI:\n%s!", 3917 string_VkStructureType(pUpdate->sType), binding, DSstr.c_str()); 3918 } else { // TODO : should we skip update on a type mismatch or force it? 3919 uint32_t startIndex; 3920 startIndex = getUpdateStartIndex(my_data, device, pLayout, binding, pWDS[i].dstArrayElement, pUpdate); 3921 // Layout bindings match w/ update, now verify that update type 3922 // & stageFlags are the same for entire update 3923 if ((skipCall = validateUpdateConsistency(my_data, device, pLayout, pUpdate, startIndex, endIndex)) == false) { 3924 // The update is within bounds and consistent, but need to 3925 // make sure contents make sense as well 3926 if ((skipCall = validateUpdateContents(my_data, &pWDS[i], 3927 &pLayout->createInfo.pBindings[bindingToIndex->second])) == false) { 3928 // Update is good. Save the update info 3929 // Create new update struct for this set's shadow copy 3930 GENERIC_HEADER *pNewNode = NULL; 3931 skipCall |= shadowUpdateNode(my_data, device, pUpdate, &pNewNode); 3932 if (NULL == pNewNode) { 3933 skipCall |= log_msg( 3934 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 3935 (uint64_t)(ds), __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", 3936 "Out of memory while attempting to allocate UPDATE struct in vkUpdateDescriptors()"); 3937 } else { 3938 // Insert shadow node into LL of updates for this set 3939 pNewNode->pNext = pSet->pUpdateStructs; 3940 pSet->pUpdateStructs = pNewNode; 3941 // Now update appropriate descriptor(s) to point to new Update node 3942 for (uint32_t j = startIndex; j <= endIndex; j++) { 3943 assert(j < pSet->descriptorCount); 3944 pSet->pDescriptorUpdates[j] = pNewNode; 3945 } 3946 } 3947 } 3948 } 3949 } 3950 } 3951 } 3952 // Now validate copy updates 3953 for (i = 0; i < descriptorCopyCount; ++i) { 3954 SET_NODE *pSrcSet = NULL, *pDstSet = NULL; 3955 LAYOUT_NODE *pSrcLayout = NULL, *pDstLayout = NULL; 3956 uint32_t srcStartIndex = 0, srcEndIndex = 0, dstStartIndex = 0, dstEndIndex = 0; 3957 // For each copy make sure that update falls within given layout and that types match 3958 pSrcSet = my_data->setMap[pCDS[i].srcSet]; 3959 pDstSet = my_data->setMap[pCDS[i].dstSet]; 3960 // Set being updated cannot be in-flight 3961 if ((skipCall = validateIdleDescriptorSet(my_data, pDstSet->set, "VkUpdateDescriptorSets")) == true) 3962 return skipCall; 3963 invalidateBoundCmdBuffers(my_data, pDstSet); 3964 pSrcLayout = pSrcSet->pLayout; 3965 pDstLayout = pDstSet->pLayout; 3966 // Validate that src binding is valid for src set layout 3967 if (pSrcLayout->bindingToIndexMap.find(pCDS[i].srcBinding) == pSrcLayout->bindingToIndexMap.end()) { 3968 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 3969 (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS", 3970 "Copy descriptor update %u has srcBinding %u " 3971 "which is out of bounds for underlying SetLayout " 3972 "%#" PRIxLEAST64 " which only has bindings 0-%u.", 3973 i, pCDS[i].srcBinding, (uint64_t)pSrcLayout->layout, pSrcLayout->createInfo.bindingCount - 1); 3974 } else if (pDstLayout->bindingToIndexMap.find(pCDS[i].dstBinding) == pDstLayout->bindingToIndexMap.end()) { 3975 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 3976 (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS", 3977 "Copy descriptor update %u has dstBinding %u " 3978 "which is out of bounds for underlying SetLayout " 3979 "%#" PRIxLEAST64 " which only has bindings 0-%u.", 3980 i, pCDS[i].dstBinding, (uint64_t)pDstLayout->layout, pDstLayout->createInfo.bindingCount - 1); 3981 } else { 3982 // Proceed with validation. Bindings are ok, but make sure update is within bounds of given layout 3983 srcEndIndex = getUpdateEndIndex(my_data, device, pSrcLayout, pCDS[i].srcBinding, pCDS[i].srcArrayElement, 3984 (const GENERIC_HEADER *)&(pCDS[i])); 3985 dstEndIndex = getUpdateEndIndex(my_data, device, pDstLayout, pCDS[i].dstBinding, pCDS[i].dstArrayElement, 3986 (const GENERIC_HEADER *)&(pCDS[i])); 3987 if (getBindingEndIndex(pSrcLayout, pCDS[i].srcBinding) < srcEndIndex) { 3988 pLayoutCI = &pSrcLayout->createInfo; 3989 string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS} "); 3990 skipCall |= 3991 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 3992 (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS", 3993 "Copy descriptor src update is out of bounds for matching binding %u in Layout w/ CI:\n%s!", 3994 pCDS[i].srcBinding, DSstr.c_str()); 3995 } else if (getBindingEndIndex(pDstLayout, pCDS[i].dstBinding) < dstEndIndex) { 3996 pLayoutCI = &pDstLayout->createInfo; 3997 string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS} "); 3998 skipCall |= 3999 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 4000 (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS", 4001 "Copy descriptor dest update is out of bounds for matching binding %u in Layout w/ CI:\n%s!", 4002 pCDS[i].dstBinding, DSstr.c_str()); 4003 } else { 4004 srcStartIndex = getUpdateStartIndex(my_data, device, pSrcLayout, pCDS[i].srcBinding, pCDS[i].srcArrayElement, 4005 (const GENERIC_HEADER *)&(pCDS[i])); 4006 dstStartIndex = getUpdateStartIndex(my_data, device, pDstLayout, pCDS[i].dstBinding, pCDS[i].dstArrayElement, 4007 (const GENERIC_HEADER *)&(pCDS[i])); 4008 for (uint32_t j = 0; j < pCDS[i].descriptorCount; ++j) { 4009 // For copy just make sure that the types match and then perform the update 4010 if (pSrcLayout->descriptorTypes[srcStartIndex + j] != pDstLayout->descriptorTypes[dstStartIndex + j]) { 4011 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 4012 __LINE__, DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS", 4013 "Copy descriptor update index %u, update count #%u, has src update descriptor type %s " 4014 "that does not match overlapping dest descriptor type of %s!", 4015 i, j + 1, string_VkDescriptorType(pSrcLayout->descriptorTypes[srcStartIndex + j]), 4016 string_VkDescriptorType(pDstLayout->descriptorTypes[dstStartIndex + j])); 4017 } else { 4018 // point dst descriptor at corresponding src descriptor 4019 // TODO : This may be a hole. I believe copy should be its own copy, 4020 // otherwise a subsequent write update to src will incorrectly affect the copy 4021 pDstSet->pDescriptorUpdates[j + dstStartIndex] = pSrcSet->pDescriptorUpdates[j + srcStartIndex]; 4022 pDstSet->pUpdateStructs = pSrcSet->pUpdateStructs; 4023 } 4024 } 4025 } 4026 } 4027 } 4028 return skipCall; 4029} 4030 4031// Verify that given pool has descriptors that are being requested for allocation. 4032// NOTE : Calls to this function should be wrapped in mutex 4033static bool validate_descriptor_availability_in_pool(layer_data *dev_data, DESCRIPTOR_POOL_NODE *pPoolNode, uint32_t count, 4034 const VkDescriptorSetLayout *pSetLayouts) { 4035 bool skipCall = false; 4036 uint32_t i = 0; 4037 uint32_t j = 0; 4038 4039 // Track number of descriptorSets allowable in this pool 4040 if (pPoolNode->availableSets < count) { 4041 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, 4042 reinterpret_cast<uint64_t &>(pPoolNode->pool), __LINE__, DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS", 4043 "Unable to allocate %u descriptorSets from pool %#" PRIxLEAST64 4044 ". This pool only has %d descriptorSets remaining.", 4045 count, reinterpret_cast<uint64_t &>(pPoolNode->pool), pPoolNode->availableSets); 4046 } else { 4047 pPoolNode->availableSets -= count; 4048 } 4049 4050 for (i = 0; i < count; ++i) { 4051 LAYOUT_NODE *pLayout = getLayoutNode(dev_data, pSetLayouts[i]); 4052 if (NULL == pLayout) { 4053 skipCall |= 4054 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, 4055 (uint64_t)pSetLayouts[i], __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS", 4056 "Unable to find set layout node for layout %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call", 4057 (uint64_t)pSetLayouts[i]); 4058 } else { 4059 uint32_t typeIndex = 0, poolSizeCount = 0; 4060 for (j = 0; j < pLayout->createInfo.bindingCount; ++j) { 4061 typeIndex = static_cast<uint32_t>(pLayout->createInfo.pBindings[j].descriptorType); 4062 poolSizeCount = pLayout->createInfo.pBindings[j].descriptorCount; 4063 if (poolSizeCount > pPoolNode->availableDescriptorTypeCount[typeIndex]) { 4064 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 4065 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)pLayout->layout, __LINE__, 4066 DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS", 4067 "Unable to allocate %u descriptors of type %s from pool %#" PRIxLEAST64 4068 ". This pool only has %d descriptors of this type remaining.", 4069 poolSizeCount, string_VkDescriptorType(pLayout->createInfo.pBindings[j].descriptorType), 4070 (uint64_t)pPoolNode->pool, pPoolNode->availableDescriptorTypeCount[typeIndex]); 4071 } else { // Decrement available descriptors of this type 4072 pPoolNode->availableDescriptorTypeCount[typeIndex] -= poolSizeCount; 4073 } 4074 } 4075 } 4076 } 4077 return skipCall; 4078} 4079 4080// Free the shadowed update node for this Set 4081// NOTE : Calls to this function should be wrapped in mutex 4082static void freeShadowUpdateTree(SET_NODE *pSet) { 4083 GENERIC_HEADER *pShadowUpdate = pSet->pUpdateStructs; 4084 pSet->pUpdateStructs = NULL; 4085 GENERIC_HEADER *pFreeUpdate = pShadowUpdate; 4086 // Clear the descriptor mappings as they will now be invalid 4087 pSet->pDescriptorUpdates.clear(); 4088 while (pShadowUpdate) { 4089 pFreeUpdate = pShadowUpdate; 4090 pShadowUpdate = (GENERIC_HEADER *)pShadowUpdate->pNext; 4091 VkWriteDescriptorSet *pWDS = NULL; 4092 switch (pFreeUpdate->sType) { 4093 case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET: 4094 pWDS = (VkWriteDescriptorSet *)pFreeUpdate; 4095 switch (pWDS->descriptorType) { 4096 case VK_DESCRIPTOR_TYPE_SAMPLER: 4097 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: 4098 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: 4099 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: { 4100 delete[] pWDS->pImageInfo; 4101 } break; 4102 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: 4103 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: { 4104 delete[] pWDS->pTexelBufferView; 4105 } break; 4106 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: 4107 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: 4108 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: 4109 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: { 4110 delete[] pWDS->pBufferInfo; 4111 } break; 4112 default: 4113 break; 4114 } 4115 break; 4116 case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET: 4117 break; 4118 default: 4119 assert(0); 4120 break; 4121 } 4122 delete pFreeUpdate; 4123 } 4124} 4125 4126// Free all DS Pools including their Sets & related sub-structs 4127// NOTE : Calls to this function should be wrapped in mutex 4128static void deletePools(layer_data *my_data) { 4129 if (my_data->descriptorPoolMap.size() <= 0) 4130 return; 4131 for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) { 4132 SET_NODE *pSet = (*ii).second->pSets; 4133 SET_NODE *pFreeSet = pSet; 4134 while (pSet) { 4135 pFreeSet = pSet; 4136 pSet = pSet->pNext; 4137 // Freeing layouts handled in deleteLayouts() function 4138 // Free Update shadow struct tree 4139 freeShadowUpdateTree(pFreeSet); 4140 delete pFreeSet; 4141 } 4142 delete (*ii).second; 4143 } 4144 my_data->descriptorPoolMap.clear(); 4145} 4146 4147// WARN : Once deleteLayouts() called, any layout ptrs in Pool/Set data structure will be invalid 4148// NOTE : Calls to this function should be wrapped in mutex 4149static void deleteLayouts(layer_data *my_data) { 4150 if (my_data->descriptorSetLayoutMap.size() <= 0) 4151 return; 4152 for (auto ii = my_data->descriptorSetLayoutMap.begin(); ii != my_data->descriptorSetLayoutMap.end(); ++ii) { 4153 LAYOUT_NODE *pLayout = (*ii).second; 4154 if (pLayout->createInfo.pBindings) { 4155 for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) { 4156 delete[] pLayout->createInfo.pBindings[i].pImmutableSamplers; 4157 } 4158 delete[] pLayout->createInfo.pBindings; 4159 } 4160 delete pLayout; 4161 } 4162 my_data->descriptorSetLayoutMap.clear(); 4163} 4164 4165// Currently clearing a set is removing all previous updates to that set 4166// TODO : Validate if this is correct clearing behavior 4167static void clearDescriptorSet(layer_data *my_data, VkDescriptorSet set) { 4168 SET_NODE *pSet = getSetNode(my_data, set); 4169 if (!pSet) { 4170 // TODO : Return error 4171 } else { 4172 freeShadowUpdateTree(pSet); 4173 } 4174} 4175 4176static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool, 4177 VkDescriptorPoolResetFlags flags) { 4178 DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pool); 4179 if (!pPool) { 4180 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, 4181 (uint64_t)pool, __LINE__, DRAWSTATE_INVALID_POOL, "DS", 4182 "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkResetDescriptorPool() call", (uint64_t)pool); 4183 } else { 4184 // TODO: validate flags 4185 // For every set off of this pool, clear it, remove from setMap, and free SET_NODE 4186 SET_NODE *pSet = pPool->pSets; 4187 SET_NODE *pFreeSet = pSet; 4188 while (pSet) { 4189 clearDescriptorSet(my_data, pSet->set); 4190 my_data->setMap.erase(pSet->set); 4191 pFreeSet = pSet; 4192 pSet = pSet->pNext; 4193 delete pFreeSet; 4194 } 4195 pPool->pSets = nullptr; 4196 // Reset available count for each type and available sets for this pool 4197 for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) { 4198 pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i]; 4199 } 4200 pPool->availableSets = pPool->maxSets; 4201 } 4202} 4203 4204// For given CB object, fetch associated CB Node from map 4205static GLOBAL_CB_NODE *getCBNode(layer_data *my_data, const VkCommandBuffer cb) { 4206 if (my_data->commandBufferMap.count(cb) == 0) { 4207 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 4208 reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 4209 "Attempt to use CommandBuffer %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb)); 4210 return NULL; 4211 } 4212 return my_data->commandBufferMap[cb]; 4213} 4214 4215// Free all CB Nodes 4216// NOTE : Calls to this function should be wrapped in mutex 4217static void deleteCommandBuffers(layer_data *my_data) { 4218 if (my_data->commandBufferMap.empty()) { 4219 return; 4220 } 4221 for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) { 4222 delete (*ii).second; 4223 } 4224 my_data->commandBufferMap.clear(); 4225} 4226 4227static bool report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) { 4228 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 4229 (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS", 4230 "You must call vkBeginCommandBuffer() before this call to %s", caller_name); 4231} 4232 4233bool validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) { 4234 if (!pCB->activeRenderPass) 4235 return false; 4236 bool skip_call = false; 4237 if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS && cmd_type != CMD_EXECUTECOMMANDS) { 4238 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 4239 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 4240 "Commands cannot be called in a subpass using secondary command buffers."); 4241 } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) { 4242 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 4243 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 4244 "vkCmdExecuteCommands() cannot be called in a subpass using inline commands."); 4245 } 4246 return skip_call; 4247} 4248 4249static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) { 4250 if (!(flags & VK_QUEUE_GRAPHICS_BIT)) 4251 return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 4252 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 4253 "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name); 4254 return false; 4255} 4256 4257static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) { 4258 if (!(flags & VK_QUEUE_COMPUTE_BIT)) 4259 return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 4260 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 4261 "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name); 4262 return false; 4263} 4264 4265static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) { 4266 if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT))) 4267 return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 4268 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 4269 "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name); 4270 return false; 4271} 4272 4273// Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not 4274// in the recording state or if there's an issue with the Cmd ordering 4275static bool addCmd(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) { 4276 bool skipCall = false; 4277 auto pool_data = my_data->commandPoolMap.find(pCB->createInfo.commandPool); 4278 if (pool_data != my_data->commandPoolMap.end()) { 4279 VkQueueFlags flags = my_data->phys_dev_properties.queue_family_properties[pool_data->second.queueFamilyIndex].queueFlags; 4280 switch (cmd) { 4281 case CMD_BINDPIPELINE: 4282 case CMD_BINDPIPELINEDELTA: 4283 case CMD_BINDDESCRIPTORSETS: 4284 case CMD_FILLBUFFER: 4285 case CMD_CLEARCOLORIMAGE: 4286 case CMD_SETEVENT: 4287 case CMD_RESETEVENT: 4288 case CMD_WAITEVENTS: 4289 case CMD_BEGINQUERY: 4290 case CMD_ENDQUERY: 4291 case CMD_RESETQUERYPOOL: 4292 case CMD_COPYQUERYPOOLRESULTS: 4293 case CMD_WRITETIMESTAMP: 4294 skipCall |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str()); 4295 break; 4296 case CMD_SETVIEWPORTSTATE: 4297 case CMD_SETSCISSORSTATE: 4298 case CMD_SETLINEWIDTHSTATE: 4299 case CMD_SETDEPTHBIASSTATE: 4300 case CMD_SETBLENDSTATE: 4301 case CMD_SETDEPTHBOUNDSSTATE: 4302 case CMD_SETSTENCILREADMASKSTATE: 4303 case CMD_SETSTENCILWRITEMASKSTATE: 4304 case CMD_SETSTENCILREFERENCESTATE: 4305 case CMD_BINDINDEXBUFFER: 4306 case CMD_BINDVERTEXBUFFER: 4307 case CMD_DRAW: 4308 case CMD_DRAWINDEXED: 4309 case CMD_DRAWINDIRECT: 4310 case CMD_DRAWINDEXEDINDIRECT: 4311 case CMD_BLITIMAGE: 4312 case CMD_CLEARATTACHMENTS: 4313 case CMD_CLEARDEPTHSTENCILIMAGE: 4314 case CMD_RESOLVEIMAGE: 4315 case CMD_BEGINRENDERPASS: 4316 case CMD_NEXTSUBPASS: 4317 case CMD_ENDRENDERPASS: 4318 skipCall |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str()); 4319 break; 4320 case CMD_DISPATCH: 4321 case CMD_DISPATCHINDIRECT: 4322 skipCall |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str()); 4323 break; 4324 case CMD_COPYBUFFER: 4325 case CMD_COPYIMAGE: 4326 case CMD_COPYBUFFERTOIMAGE: 4327 case CMD_COPYIMAGETOBUFFER: 4328 case CMD_CLONEIMAGEDATA: 4329 case CMD_UPDATEBUFFER: 4330 case CMD_PIPELINEBARRIER: 4331 case CMD_EXECUTECOMMANDS: 4332 break; 4333 default: 4334 break; 4335 } 4336 } 4337 if (pCB->state != CB_RECORDING) { 4338 skipCall |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name); 4339 skipCall |= validateCmdsInCmdBuffer(my_data, pCB, cmd); 4340 CMD_NODE cmdNode = {}; 4341 // init cmd node and append to end of cmd LL 4342 cmdNode.cmdNumber = ++pCB->numCmds; 4343 cmdNode.type = cmd; 4344 pCB->cmds.push_back(cmdNode); 4345 } 4346 return skipCall; 4347} 4348// Reset the command buffer state 4349// Maintain the createInfo and set state to CB_NEW, but clear all other state 4350static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) { 4351 GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb]; 4352 if (pCB) { 4353 pCB->in_use.store(0); 4354 pCB->cmds.clear(); 4355 // Reset CB state (note that createInfo is not cleared) 4356 pCB->commandBuffer = cb; 4357 memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo)); 4358 memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo)); 4359 pCB->numCmds = 0; 4360 memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t)); 4361 pCB->state = CB_NEW; 4362 pCB->submitCount = 0; 4363 pCB->status = 0; 4364 pCB->viewports.clear(); 4365 pCB->scissors.clear(); 4366 4367 for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) { 4368 // Before clearing lastBoundState, remove any CB bindings from all uniqueBoundSets 4369 for (auto set : pCB->lastBound[i].uniqueBoundSets) { 4370 auto set_node = dev_data->setMap.find(set); 4371 if (set_node != dev_data->setMap.end()) { 4372 set_node->second->boundCmdBuffers.erase(pCB->commandBuffer); 4373 } 4374 } 4375 pCB->lastBound[i].reset(); 4376 } 4377 4378 memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo)); 4379 pCB->activeRenderPass = 0; 4380 pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE; 4381 pCB->activeSubpass = 0; 4382 pCB->lastSubmittedFence = VK_NULL_HANDLE; 4383 pCB->lastSubmittedQueue = VK_NULL_HANDLE; 4384 pCB->destroyedSets.clear(); 4385 pCB->updatedSets.clear(); 4386 pCB->destroyedFramebuffers.clear(); 4387 pCB->waitedEvents.clear(); 4388 pCB->semaphores.clear(); 4389 pCB->events.clear(); 4390 pCB->waitedEventsBeforeQueryReset.clear(); 4391 pCB->queryToStateMap.clear(); 4392 pCB->activeQueries.clear(); 4393 pCB->startedQueries.clear(); 4394 pCB->imageSubresourceMap.clear(); 4395 pCB->imageLayoutMap.clear(); 4396 pCB->eventToStageMap.clear(); 4397 pCB->drawData.clear(); 4398 pCB->currentDrawData.buffers.clear(); 4399 pCB->primaryCommandBuffer = VK_NULL_HANDLE; 4400 // Make sure any secondaryCommandBuffers are removed from globalInFlight 4401 for (auto secondary_cb : pCB->secondaryCommandBuffers) { 4402 dev_data->globalInFlightCmdBuffers.erase(secondary_cb); 4403 } 4404 pCB->secondaryCommandBuffers.clear(); 4405 pCB->updateImages.clear(); 4406 pCB->updateBuffers.clear(); 4407 clear_cmd_buf_and_mem_references(dev_data, pCB); 4408 pCB->eventUpdates.clear(); 4409 4410 // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list 4411 for (auto framebuffer : pCB->framebuffers) { 4412 auto fbNode = dev_data->frameBufferMap.find(framebuffer); 4413 if (fbNode != dev_data->frameBufferMap.end()) { 4414 fbNode->second.referencingCmdBuffers.erase(pCB->commandBuffer); 4415 } 4416 } 4417 pCB->framebuffers.clear(); 4418 4419 } 4420} 4421 4422// Set PSO-related status bits for CB, including dynamic state set via PSO 4423static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe) { 4424 // Account for any dynamic state not set via this PSO 4425 if (!pPipe->graphicsPipelineCI.pDynamicState || 4426 !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) { // All state is static 4427 pCB->status = CBSTATUS_ALL; 4428 } else { 4429 // First consider all state on 4430 // Then unset any state that's noted as dynamic in PSO 4431 // Finally OR that into CB statemask 4432 CBStatusFlags psoDynStateMask = CBSTATUS_ALL; 4433 for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) { 4434 switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) { 4435 case VK_DYNAMIC_STATE_VIEWPORT: 4436 psoDynStateMask &= ~CBSTATUS_VIEWPORT_SET; 4437 break; 4438 case VK_DYNAMIC_STATE_SCISSOR: 4439 psoDynStateMask &= ~CBSTATUS_SCISSOR_SET; 4440 break; 4441 case VK_DYNAMIC_STATE_LINE_WIDTH: 4442 psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET; 4443 break; 4444 case VK_DYNAMIC_STATE_DEPTH_BIAS: 4445 psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET; 4446 break; 4447 case VK_DYNAMIC_STATE_BLEND_CONSTANTS: 4448 psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET; 4449 break; 4450 case VK_DYNAMIC_STATE_DEPTH_BOUNDS: 4451 psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET; 4452 break; 4453 case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK: 4454 psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET; 4455 break; 4456 case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK: 4457 psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET; 4458 break; 4459 case VK_DYNAMIC_STATE_STENCIL_REFERENCE: 4460 psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET; 4461 break; 4462 default: 4463 // TODO : Flag error here 4464 break; 4465 } 4466 } 4467 pCB->status |= psoDynStateMask; 4468 } 4469} 4470 4471// Print the last bound Gfx Pipeline 4472static bool printPipeline(layer_data *my_data, const VkCommandBuffer cb) { 4473 bool skipCall = false; 4474 GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb); 4475 if (pCB) { 4476 PIPELINE_NODE *pPipeTrav = getPipeline(my_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline); 4477 if (!pPipeTrav) { 4478 // nothing to print 4479 } else { 4480 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 4481 __LINE__, DRAWSTATE_NONE, "DS", "%s", 4482 vk_print_vkgraphicspipelinecreateinfo( 4483 reinterpret_cast<const VkGraphicsPipelineCreateInfo *>(&pPipeTrav->graphicsPipelineCI), "{DS}") 4484 .c_str()); 4485 } 4486 } 4487 return skipCall; 4488} 4489 4490static void printCB(layer_data *my_data, const VkCommandBuffer cb) { 4491 GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb); 4492 if (pCB && pCB->cmds.size() > 0) { 4493 log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 4494 DRAWSTATE_NONE, "DS", "Cmds in CB %p", (void *)cb); 4495 vector<CMD_NODE> cmds = pCB->cmds; 4496 for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) { 4497 // TODO : Need to pass cb as srcObj here 4498 log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 4499 __LINE__, DRAWSTATE_NONE, "DS", " CMD#%" PRIu64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str()); 4500 } 4501 } else { 4502 // Nothing to print 4503 } 4504} 4505 4506static bool synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) { 4507 bool skipCall = false; 4508 if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) { 4509 return skipCall; 4510 } 4511 skipCall |= printPipeline(my_data, cb); 4512 return skipCall; 4513} 4514 4515// Flags validation error if the associated call is made inside a render pass. The apiName 4516// routine should ONLY be called outside a render pass. 4517static bool insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) { 4518 bool inside = false; 4519 if (pCB->activeRenderPass) { 4520 inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 4521 (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS", 4522 "%s: It is invalid to issue this call inside an active render pass (%#" PRIxLEAST64 ")", apiName, 4523 (uint64_t)pCB->activeRenderPass); 4524 } 4525 return inside; 4526} 4527 4528// Flags validation error if the associated call is made outside a render pass. The apiName 4529// routine should ONLY be called inside a render pass. 4530static bool outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) { 4531 bool outside = false; 4532 if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) || 4533 ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) && 4534 !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) { 4535 outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 4536 (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS", 4537 "%s: This call must be issued inside an active render pass.", apiName); 4538 } 4539 return outside; 4540} 4541 4542static void init_core_validation(layer_data *instance_data, const VkAllocationCallbacks *pAllocator) { 4543 4544 layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation"); 4545 4546} 4547 4548VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 4549vkCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) { 4550 VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO); 4551 4552 assert(chain_info->u.pLayerInfo); 4553 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr; 4554 PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance"); 4555 if (fpCreateInstance == NULL) 4556 return VK_ERROR_INITIALIZATION_FAILED; 4557 4558 // Advance the link info for the next element on the chain 4559 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext; 4560 4561 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance); 4562 if (result != VK_SUCCESS) 4563 return result; 4564 4565 layer_data *instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map); 4566 instance_data->instance_dispatch_table = new VkLayerInstanceDispatchTable; 4567 layer_init_instance_dispatch_table(*pInstance, instance_data->instance_dispatch_table, fpGetInstanceProcAddr); 4568 4569 instance_data->report_data = 4570 debug_report_create_instance(instance_data->instance_dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, 4571 pCreateInfo->ppEnabledExtensionNames); 4572 4573 init_core_validation(instance_data, pAllocator); 4574 4575 ValidateLayerOrdering(*pCreateInfo); 4576 4577 return result; 4578} 4579 4580/* hook DestroyInstance to remove tableInstanceMap entry */ 4581VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) { 4582 // TODOSC : Shouldn't need any customization here 4583 dispatch_key key = get_dispatch_key(instance); 4584 // TBD: Need any locking this early, in case this function is called at the 4585 // same time by more than one thread? 4586 layer_data *my_data = get_my_data_ptr(key, layer_data_map); 4587 VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table; 4588 pTable->DestroyInstance(instance, pAllocator); 4589 4590 std::lock_guard<std::mutex> lock(global_lock); 4591 // Clean up logging callback, if any 4592 while (my_data->logging_callback.size() > 0) { 4593 VkDebugReportCallbackEXT callback = my_data->logging_callback.back(); 4594 layer_destroy_msg_callback(my_data->report_data, callback, pAllocator); 4595 my_data->logging_callback.pop_back(); 4596 } 4597 4598 layer_debug_report_destroy_instance(my_data->report_data); 4599 delete my_data->instance_dispatch_table; 4600 layer_data_map.erase(key); 4601} 4602 4603static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) { 4604 uint32_t i; 4605 // TBD: Need any locking, in case this function is called at the same time 4606 // by more than one thread? 4607 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 4608 dev_data->device_extensions.wsi_enabled = false; 4609 4610 VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table; 4611 PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr; 4612 pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR"); 4613 pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR"); 4614 pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR"); 4615 pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR"); 4616 pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR"); 4617 4618 for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) { 4619 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0) 4620 dev_data->device_extensions.wsi_enabled = true; 4621 } 4622} 4623 4624VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, 4625 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) { 4626 VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO); 4627 4628 assert(chain_info->u.pLayerInfo); 4629 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr; 4630 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr; 4631 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice"); 4632 if (fpCreateDevice == NULL) { 4633 return VK_ERROR_INITIALIZATION_FAILED; 4634 } 4635 4636 // Advance the link info for the next element on the chain 4637 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext; 4638 4639 VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice); 4640 if (result != VK_SUCCESS) { 4641 return result; 4642 } 4643 4644 std::unique_lock<std::mutex> lock(global_lock); 4645 layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map); 4646 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map); 4647 4648 // Setup device dispatch table 4649 my_device_data->device_dispatch_table = new VkLayerDispatchTable; 4650 layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr); 4651 my_device_data->device = *pDevice; 4652 4653 my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice); 4654 createDeviceRegisterExtensions(pCreateInfo, *pDevice); 4655 // Get physical device limits for this device 4656 my_instance_data->instance_dispatch_table->GetPhysicalDeviceProperties(gpu, &(my_device_data->phys_dev_properties.properties)); 4657 uint32_t count; 4658 my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr); 4659 my_device_data->phys_dev_properties.queue_family_properties.resize(count); 4660 my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties( 4661 gpu, &count, &my_device_data->phys_dev_properties.queue_family_properties[0]); 4662 // TODO: device limits should make sure these are compatible 4663 if (pCreateInfo->pEnabledFeatures) { 4664 my_device_data->phys_dev_properties.features = *pCreateInfo->pEnabledFeatures; 4665 } else { 4666 memset(&my_device_data->phys_dev_properties.features, 0, sizeof(VkPhysicalDeviceFeatures)); 4667 } 4668 // Store physical device mem limits into device layer_data struct 4669 my_instance_data->instance_dispatch_table->GetPhysicalDeviceMemoryProperties(gpu, &my_device_data->phys_dev_mem_props); 4670 lock.unlock(); 4671 4672 ValidateLayerOrdering(*pCreateInfo); 4673 4674 return result; 4675} 4676 4677// prototype 4678static void deleteRenderPasses(layer_data *); 4679VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) { 4680 // TODOSC : Shouldn't need any customization here 4681 dispatch_key key = get_dispatch_key(device); 4682 layer_data *dev_data = get_my_data_ptr(key, layer_data_map); 4683 // Free all the memory 4684 std::unique_lock<std::mutex> lock(global_lock); 4685 deletePipelines(dev_data); 4686 deleteRenderPasses(dev_data); 4687 deleteCommandBuffers(dev_data); 4688 deletePools(dev_data); 4689 deleteLayouts(dev_data); 4690 dev_data->imageViewMap.clear(); 4691 dev_data->imageMap.clear(); 4692 dev_data->imageSubresourceMap.clear(); 4693 dev_data->imageLayoutMap.clear(); 4694 dev_data->bufferViewMap.clear(); 4695 dev_data->bufferMap.clear(); 4696 // Queues persist until device is destroyed 4697 dev_data->queueMap.clear(); 4698 lock.unlock(); 4699#if MTMERGESOURCE 4700 bool skipCall = false; 4701 lock.lock(); 4702 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 4703 (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()"); 4704 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 4705 (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================"); 4706 print_mem_list(dev_data); 4707 printCBList(dev_data); 4708 // Report any memory leaks 4709 DEVICE_MEM_INFO *pInfo = NULL; 4710 if (!dev_data->memObjMap.empty()) { 4711 for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) { 4712 pInfo = &(*ii).second; 4713 if (pInfo->allocInfo.allocationSize != 0) { 4714 // Valid Usage: All child objects created on device must have been destroyed prior to destroying device 4715 skipCall |= 4716 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 4717 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK, 4718 "MEM", "Mem Object %" PRIu64 " has not been freed. You should clean up this memory by calling " 4719 "vkFreeMemory(%" PRIu64 ") prior to vkDestroyDevice().", 4720 (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem)); 4721 } 4722 } 4723 } 4724 layer_debug_report_destroy_device(device); 4725 lock.unlock(); 4726 4727#if DISPATCH_MAP_DEBUG 4728 fprintf(stderr, "Device: %p, key: %p\n", device, key); 4729#endif 4730 VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table; 4731 if (!skipCall) { 4732 pDisp->DestroyDevice(device, pAllocator); 4733 } 4734#else 4735 dev_data->device_dispatch_table->DestroyDevice(device, pAllocator); 4736#endif 4737 delete dev_data->device_dispatch_table; 4738 layer_data_map.erase(key); 4739} 4740 4741static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}}; 4742 4743VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 4744vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) { 4745 return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties); 4746} 4747 4748VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 4749vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) { 4750 return util_GetLayerProperties(ARRAY_SIZE(cv_global_layers), cv_global_layers, pCount, pProperties); 4751} 4752 4753// TODO: Why does this exist - can we just use global? 4754static const VkLayerProperties cv_device_layers[] = {{ 4755 "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer", 4756}}; 4757 4758VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, 4759 const char *pLayerName, uint32_t *pCount, 4760 VkExtensionProperties *pProperties) { 4761 if (pLayerName == NULL) { 4762 dispatch_key key = get_dispatch_key(physicalDevice); 4763 layer_data *my_data = get_my_data_ptr(key, layer_data_map); 4764 return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties); 4765 } else { 4766 return util_GetExtensionProperties(0, NULL, pCount, pProperties); 4767 } 4768} 4769 4770VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 4771vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) { 4772 /* draw_state physical device layers are the same as global */ 4773 return util_GetLayerProperties(ARRAY_SIZE(cv_device_layers), cv_device_layers, pCount, pProperties); 4774} 4775 4776// This validates that the initial layout specified in the command buffer for 4777// the IMAGE is the same 4778// as the global IMAGE layout 4779static bool ValidateCmdBufImageLayouts(VkCommandBuffer cmdBuffer) { 4780 bool skip_call = false; 4781 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map); 4782 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer); 4783 for (auto cb_image_data : pCB->imageLayoutMap) { 4784 VkImageLayout imageLayout; 4785 if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) { 4786 skip_call |= 4787 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 4788 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image %" PRIu64 ".", 4789 reinterpret_cast<const uint64_t &>(cb_image_data.first)); 4790 } else { 4791 if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) { 4792 // TODO: Set memory invalid which is in mem_tracker currently 4793 } else if (imageLayout != cb_image_data.second.initialLayout) { 4794 if (cb_image_data.first.hasSubresource) { 4795 skip_call |= log_msg( 4796 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 4797 reinterpret_cast<uint64_t &>(cmdBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 4798 "Cannot submit cmd buffer using image (%" PRIx64 ") [sub-resource: array layer %u, mip level %u], " 4799 "with layout %s when first use is %s.", 4800 reinterpret_cast<const uint64_t &>(cb_image_data.first.image), cb_image_data.first.subresource.arrayLayer, 4801 cb_image_data.first.subresource.mipLevel, string_VkImageLayout(imageLayout), 4802 string_VkImageLayout(cb_image_data.second.initialLayout)); 4803 } else { 4804 skip_call |= log_msg( 4805 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 4806 reinterpret_cast<uint64_t &>(cmdBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 4807 "Cannot submit cmd buffer using image (%" PRIx64 ") with layout %s when " 4808 "first use is %s.", 4809 reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout), 4810 string_VkImageLayout(cb_image_data.second.initialLayout)); 4811 } 4812 } 4813 SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout); 4814 } 4815 } 4816 return skip_call; 4817} 4818 4819// Track which resources are in-flight by atomically incrementing their "in_use" count 4820static bool validateAndIncrementResources(layer_data *my_data, GLOBAL_CB_NODE *pCB) { 4821 bool skip_call = false; 4822 for (auto drawDataElement : pCB->drawData) { 4823 for (auto buffer : drawDataElement.buffers) { 4824 auto buffer_data = my_data->bufferMap.find(buffer); 4825 if (buffer_data == my_data->bufferMap.end()) { 4826 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, 4827 (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS", 4828 "Cannot submit cmd buffer using deleted buffer %" PRIu64 ".", (uint64_t)(buffer)); 4829 } else { 4830 buffer_data->second.in_use.fetch_add(1); 4831 } 4832 } 4833 } 4834 for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) { 4835 for (auto set : pCB->lastBound[i].uniqueBoundSets) { 4836 auto setNode = my_data->setMap.find(set); 4837 if (setNode == my_data->setMap.end()) { 4838 skip_call |= 4839 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 4840 (uint64_t)(set), __LINE__, DRAWSTATE_INVALID_DESCRIPTOR_SET, "DS", 4841 "Cannot submit cmd buffer using deleted descriptor set %" PRIu64 ".", (uint64_t)(set)); 4842 } else { 4843 setNode->second->in_use.fetch_add(1); 4844 } 4845 } 4846 } 4847 for (auto semaphore : pCB->semaphores) { 4848 auto semaphoreNode = my_data->semaphoreMap.find(semaphore); 4849 if (semaphoreNode == my_data->semaphoreMap.end()) { 4850 skip_call |= 4851 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 4852 reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS", 4853 "Cannot submit cmd buffer using deleted semaphore %" PRIu64 ".", reinterpret_cast<uint64_t &>(semaphore)); 4854 } else { 4855 semaphoreNode->second.in_use.fetch_add(1); 4856 } 4857 } 4858 for (auto event : pCB->events) { 4859 auto eventNode = my_data->eventMap.find(event); 4860 if (eventNode == my_data->eventMap.end()) { 4861 skip_call |= 4862 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 4863 reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS", 4864 "Cannot submit cmd buffer using deleted event %" PRIu64 ".", reinterpret_cast<uint64_t &>(event)); 4865 } else { 4866 eventNode->second.in_use.fetch_add(1); 4867 } 4868 } 4869 return skip_call; 4870} 4871 4872// Note: This function assumes that the global lock is held by the calling 4873// thread. 4874static bool cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) { 4875 bool skip_call = false; 4876 GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer); 4877 if (pCB) { 4878 for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) { 4879 for (auto event : queryEventsPair.second) { 4880 if (my_data->eventMap[event].needsSignaled) { 4881 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 4882 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS", 4883 "Cannot get query results on queryPool %" PRIu64 4884 " with index %d which was guarded by unsignaled event %" PRIu64 ".", 4885 (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event)); 4886 } 4887 } 4888 } 4889 } 4890 return skip_call; 4891} 4892// Decrement cmd_buffer in_use and if it goes to 0 remove cmd_buffer from globalInFlightCmdBuffers 4893static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer) { 4894 // Pull it off of global list initially, but if we find it in any other queue list, add it back in 4895 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmd_buffer); 4896 pCB->in_use.fetch_sub(1); 4897 if (!pCB->in_use.load()) { 4898 dev_data->globalInFlightCmdBuffers.erase(cmd_buffer); 4899 } 4900} 4901 4902static void decrementResources(layer_data *my_data, VkCommandBuffer cmdBuffer) { 4903 GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer); 4904 for (auto drawDataElement : pCB->drawData) { 4905 for (auto buffer : drawDataElement.buffers) { 4906 auto buffer_data = my_data->bufferMap.find(buffer); 4907 if (buffer_data != my_data->bufferMap.end()) { 4908 buffer_data->second.in_use.fetch_sub(1); 4909 } 4910 } 4911 } 4912 for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) { 4913 for (auto set : pCB->lastBound[i].uniqueBoundSets) { 4914 auto setNode = my_data->setMap.find(set); 4915 if (setNode != my_data->setMap.end()) { 4916 setNode->second->in_use.fetch_sub(1); 4917 } 4918 } 4919 } 4920 for (auto semaphore : pCB->semaphores) { 4921 auto semaphoreNode = my_data->semaphoreMap.find(semaphore); 4922 if (semaphoreNode != my_data->semaphoreMap.end()) { 4923 semaphoreNode->second.in_use.fetch_sub(1); 4924 } 4925 } 4926 for (auto event : pCB->events) { 4927 auto eventNode = my_data->eventMap.find(event); 4928 if (eventNode != my_data->eventMap.end()) { 4929 eventNode->second.in_use.fetch_sub(1); 4930 } 4931 } 4932 for (auto queryStatePair : pCB->queryToStateMap) { 4933 my_data->queryToStateMap[queryStatePair.first] = queryStatePair.second; 4934 } 4935 for (auto eventStagePair : pCB->eventToStageMap) { 4936 my_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second; 4937 } 4938} 4939// For fenceCount fences in pFences, mark fence signaled, decrement in_use, and call 4940// decrementResources for all priorFences and cmdBuffers associated with fence. 4941static bool decrementResources(layer_data *my_data, uint32_t fenceCount, const VkFence *pFences) { 4942 bool skip_call = false; 4943 for (uint32_t i = 0; i < fenceCount; ++i) { 4944 auto fence_data = my_data->fenceMap.find(pFences[i]); 4945 if (fence_data == my_data->fenceMap.end() || !fence_data->second.needsSignaled) 4946 return skip_call; 4947 fence_data->second.needsSignaled = false; 4948 fence_data->second.in_use.fetch_sub(1); 4949 decrementResources(my_data, static_cast<uint32_t>(fence_data->second.priorFences.size()), 4950 fence_data->second.priorFences.data()); 4951 for (auto cmdBuffer : fence_data->second.cmdBuffers) { 4952 decrementResources(my_data, cmdBuffer); 4953 skip_call |= cleanInFlightCmdBuffer(my_data, cmdBuffer); 4954 removeInFlightCmdBuffer(my_data, cmdBuffer); 4955 } 4956 } 4957 return skip_call; 4958} 4959// Decrement in_use for all outstanding cmd buffers that were submitted on this queue 4960static bool decrementResources(layer_data *my_data, VkQueue queue) { 4961 bool skip_call = false; 4962 auto queue_data = my_data->queueMap.find(queue); 4963 if (queue_data != my_data->queueMap.end()) { 4964 for (auto cmdBuffer : queue_data->second.untrackedCmdBuffers) { 4965 decrementResources(my_data, cmdBuffer); 4966 skip_call |= cleanInFlightCmdBuffer(my_data, cmdBuffer); 4967 removeInFlightCmdBuffer(my_data, cmdBuffer); 4968 } 4969 queue_data->second.untrackedCmdBuffers.clear(); 4970 skip_call |= decrementResources(my_data, static_cast<uint32_t>(queue_data->second.lastFences.size()), 4971 queue_data->second.lastFences.data()); 4972 } 4973 return skip_call; 4974} 4975 4976// This function merges command buffer tracking between queues when there is a semaphore dependency 4977// between them (see below for details as to how tracking works). When this happens, the prior 4978// fences from the signaling queue are merged into the wait queue as well as any untracked command 4979// buffers. 4980static void updateTrackedCommandBuffers(layer_data *dev_data, VkQueue queue, VkQueue other_queue, VkFence fence) { 4981 if (queue == other_queue) { 4982 return; 4983 } 4984 auto queue_data = dev_data->queueMap.find(queue); 4985 auto other_queue_data = dev_data->queueMap.find(other_queue); 4986 if (queue_data == dev_data->queueMap.end() || other_queue_data == dev_data->queueMap.end()) { 4987 return; 4988 } 4989 for (auto fenceInner : other_queue_data->second.lastFences) { 4990 queue_data->second.lastFences.push_back(fenceInner); 4991 } 4992 if (fence != VK_NULL_HANDLE) { 4993 auto fence_data = dev_data->fenceMap.find(fence); 4994 if (fence_data == dev_data->fenceMap.end()) { 4995 return; 4996 } 4997 for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) { 4998 fence_data->second.cmdBuffers.push_back(cmdbuffer); 4999 } 5000 other_queue_data->second.untrackedCmdBuffers.clear(); 5001 } else { 5002 for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) { 5003 queue_data->second.untrackedCmdBuffers.push_back(cmdbuffer); 5004 } 5005 other_queue_data->second.untrackedCmdBuffers.clear(); 5006 } 5007 for (auto eventStagePair : other_queue_data->second.eventToStageMap) { 5008 queue_data->second.eventToStageMap[eventStagePair.first] = eventStagePair.second; 5009 } 5010} 5011 5012// This is the core function for tracking command buffers. There are two primary ways command 5013// buffers are tracked. When submitted they are stored in the command buffer list associated 5014// with a fence or the untracked command buffer list associated with a queue if no fence is used. 5015// Each queue also stores the last fence that was submitted onto the queue. This allows us to 5016// create a linked list of fences and their associated command buffers so if one fence is 5017// waited on, prior fences on that queue are also considered to have been waited on. When a fence is 5018// waited on (either via a queue, device or fence), we free the cmd buffers for that fence and 5019// recursively call with the prior fences. 5020static void trackCommandBuffers(layer_data *my_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, 5021 VkFence fence) { 5022 auto queue_data = my_data->queueMap.find(queue); 5023 if (fence != VK_NULL_HANDLE) { 5024 vector<VkFence> prior_fences; 5025 auto fence_data = my_data->fenceMap.find(fence); 5026 if (fence_data == my_data->fenceMap.end()) { 5027 return; 5028 } 5029 fence_data->second.cmdBuffers.clear(); 5030 if (queue_data != my_data->queueMap.end()) { 5031 prior_fences = queue_data->second.lastFences; 5032 queue_data->second.lastFences.clear(); 5033 queue_data->second.lastFences.push_back(fence); 5034 for (auto cmdbuffer : queue_data->second.untrackedCmdBuffers) { 5035 fence_data->second.cmdBuffers.push_back(cmdbuffer); 5036 } 5037 queue_data->second.untrackedCmdBuffers.clear(); 5038 } 5039 fence_data->second.priorFences = prior_fences; 5040 fence_data->second.needsSignaled = true; 5041 fence_data->second.queue = queue; 5042 fence_data->second.in_use.fetch_add(1); 5043 for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { 5044 const VkSubmitInfo *submit = &pSubmits[submit_idx]; 5045 for (uint32_t i = 0; i < submit->commandBufferCount; ++i) { 5046 for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) { 5047 fence_data->second.cmdBuffers.push_back(secondaryCmdBuffer); 5048 } 5049 fence_data->second.cmdBuffers.push_back(submit->pCommandBuffers[i]); 5050 } 5051 } 5052 } else { 5053 if (queue_data != my_data->queueMap.end()) { 5054 for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { 5055 const VkSubmitInfo *submit = &pSubmits[submit_idx]; 5056 for (uint32_t i = 0; i < submit->commandBufferCount; ++i) { 5057 for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) { 5058 queue_data->second.untrackedCmdBuffers.push_back(secondaryCmdBuffer); 5059 } 5060 queue_data->second.untrackedCmdBuffers.push_back(submit->pCommandBuffers[i]); 5061 } 5062 } 5063 } 5064 } 5065} 5066 5067static void markCommandBuffersInFlight(layer_data *my_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, 5068 VkFence fence) { 5069 auto queue_data = my_data->queueMap.find(queue); 5070 if (queue_data != my_data->queueMap.end()) { 5071 for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { 5072 const VkSubmitInfo *submit = &pSubmits[submit_idx]; 5073 for (uint32_t i = 0; i < submit->commandBufferCount; ++i) { 5074 // Add cmdBuffers to the global set and increment count 5075 GLOBAL_CB_NODE *pCB = getCBNode(my_data, submit->pCommandBuffers[i]); 5076 for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) { 5077 my_data->globalInFlightCmdBuffers.insert(secondaryCmdBuffer); 5078 GLOBAL_CB_NODE *pSubCB = getCBNode(my_data, secondaryCmdBuffer); 5079 pSubCB->in_use.fetch_add(1); 5080 } 5081 my_data->globalInFlightCmdBuffers.insert(submit->pCommandBuffers[i]); 5082 pCB->in_use.fetch_add(1); 5083 } 5084 } 5085 } 5086} 5087 5088static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) { 5089 bool skip_call = false; 5090 if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) && 5091 !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) { 5092 skip_call |= 5093 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 5094 __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS", 5095 "Command Buffer %#" PRIx64 " is already in use and is not marked for simultaneous use.", 5096 reinterpret_cast<uint64_t>(pCB->commandBuffer)); 5097 } 5098 return skip_call; 5099} 5100 5101static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) { 5102 bool skipCall = false; 5103 // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once 5104 if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) { 5105 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 5106 __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS", 5107 "CB %#" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT " 5108 "set, but has been submitted %#" PRIxLEAST64 " times.", 5109 (uint64_t)(pCB->commandBuffer), pCB->submitCount); 5110 } 5111 // Validate that cmd buffers have been updated 5112 if (CB_RECORDED != pCB->state) { 5113 if (CB_INVALID == pCB->state) { 5114 // Inform app of reason CB invalid 5115 bool causeReported = false; 5116 if (!pCB->destroyedSets.empty()) { 5117 std::stringstream set_string; 5118 for (auto set : pCB->destroyedSets) 5119 set_string << " " << set; 5120 5121 skipCall |= 5122 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 5123 (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 5124 "You are submitting command buffer %#" PRIxLEAST64 5125 " that is invalid because it had the following bound descriptor set(s) destroyed: %s", 5126 (uint64_t)(pCB->commandBuffer), set_string.str().c_str()); 5127 causeReported = true; 5128 } 5129 if (!pCB->updatedSets.empty()) { 5130 std::stringstream set_string; 5131 for (auto set : pCB->updatedSets) 5132 set_string << " " << set; 5133 5134 skipCall |= 5135 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 5136 (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 5137 "You are submitting command buffer %#" PRIxLEAST64 5138 " that is invalid because it had the following bound descriptor set(s) updated: %s", 5139 (uint64_t)(pCB->commandBuffer), set_string.str().c_str()); 5140 causeReported = true; 5141 } 5142 if (!pCB->destroyedFramebuffers.empty()) { 5143 std::stringstream fb_string; 5144 for (auto fb : pCB->destroyedFramebuffers) 5145 fb_string << " " << fb; 5146 5147 skipCall |= 5148 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 5149 reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 5150 "You are submitting command buffer %#" PRIxLEAST64 " that is invalid because it had the following " 5151 "referenced framebuffers destroyed: %s", 5152 reinterpret_cast<uint64_t &>(pCB->commandBuffer), fb_string.str().c_str()); 5153 causeReported = true; 5154 } 5155 // TODO : This is defensive programming to make sure an error is 5156 // flagged if we hit this INVALID cmd buffer case and none of the 5157 // above cases are hit. As the number of INVALID cases grows, this 5158 // code should be updated to seemlessly handle all the cases. 5159 if (!causeReported) { 5160 skipCall |= log_msg( 5161 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 5162 reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 5163 "You are submitting command buffer %#" PRIxLEAST64 " that is invalid due to an unknown cause. Validation " 5164 "should " 5165 "be improved to report the exact cause.", 5166 reinterpret_cast<uint64_t &>(pCB->commandBuffer)); 5167 } 5168 } else { // Flag error for using CB w/o vkEndCommandBuffer() called 5169 skipCall |= 5170 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 5171 (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS", 5172 "You must call vkEndCommandBuffer() on CB %#" PRIxLEAST64 " before this call to vkQueueSubmit()!", 5173 (uint64_t)(pCB->commandBuffer)); 5174 } 5175 } 5176 return skipCall; 5177} 5178 5179static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) { 5180 // Track in-use for resources off of primary and any secondary CBs 5181 bool skipCall = validateAndIncrementResources(dev_data, pCB); 5182 if (!pCB->secondaryCommandBuffers.empty()) { 5183 for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) { 5184 skipCall |= validateAndIncrementResources(dev_data, dev_data->commandBufferMap[secondaryCmdBuffer]); 5185 GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer); 5186 if (pSubCB->primaryCommandBuffer != pCB->commandBuffer) { 5187 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 5188 __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS", 5189 "CB %#" PRIxLEAST64 " was submitted with secondary buffer %#" PRIxLEAST64 5190 " but that buffer has subsequently been bound to " 5191 "primary cmd buffer %#" PRIxLEAST64 ".", 5192 reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer), 5193 reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer)); 5194 } 5195 } 5196 } 5197 skipCall |= validateCommandBufferState(dev_data, pCB); 5198 // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing 5199 // on device 5200 skipCall |= validateCommandBufferSimultaneousUse(dev_data, pCB); 5201 return skipCall; 5202} 5203 5204VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 5205vkQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) { 5206 bool skipCall = false; 5207 GLOBAL_CB_NODE *pCBNode = NULL; 5208 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map); 5209 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 5210 std::unique_lock<std::mutex> lock(global_lock); 5211 // First verify that fence is not in use 5212 if (fence != VK_NULL_HANDLE) { 5213 dev_data->fenceMap[fence].queue = queue; 5214 if ((submitCount != 0) && dev_data->fenceMap[fence].in_use.load()) { 5215 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, 5216 (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS", 5217 "Fence %#" PRIx64 " is already in use by another submission.", (uint64_t)(fence)); 5218 } 5219 if (!dev_data->fenceMap[fence].needsSignaled) { 5220 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, 5221 reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM", 5222 "Fence %#" PRIxLEAST64 " submitted in SIGNALED state. Fences must be reset before being submitted", 5223 reinterpret_cast<uint64_t &>(fence)); 5224 } 5225 } 5226 // TODO : Review these old print functions and clean up as appropriate 5227 print_mem_list(dev_data); 5228 printCBList(dev_data); 5229 // Update cmdBuffer-related data structs and mark fence in-use 5230 trackCommandBuffers(dev_data, queue, submitCount, pSubmits, fence); 5231 // Now verify each individual submit 5232 std::unordered_set<VkQueue> processed_other_queues; 5233 for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { 5234 const VkSubmitInfo *submit = &pSubmits[submit_idx]; 5235 vector<VkSemaphore> semaphoreList; 5236 for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) { 5237 const VkSemaphore &semaphore = submit->pWaitSemaphores[i]; 5238 semaphoreList.push_back(semaphore); 5239 if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) { 5240 if (dev_data->semaphoreMap[semaphore].signaled) { 5241 dev_data->semaphoreMap[semaphore].signaled = false; 5242 } else { 5243 skipCall |= 5244 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, 5245 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS", 5246 "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.", 5247 reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore)); 5248 } 5249 const VkQueue &other_queue = dev_data->semaphoreMap[semaphore].queue; 5250 if (other_queue != VK_NULL_HANDLE && !processed_other_queues.count(other_queue)) { 5251 updateTrackedCommandBuffers(dev_data, queue, other_queue, fence); 5252 processed_other_queues.insert(other_queue); 5253 } 5254 } 5255 } 5256 for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) { 5257 const VkSemaphore &semaphore = submit->pSignalSemaphores[i]; 5258 if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) { 5259 semaphoreList.push_back(semaphore); 5260 if (dev_data->semaphoreMap[semaphore].signaled) { 5261 skipCall |= 5262 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, 5263 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS", 5264 "Queue %#" PRIx64 " is signaling semaphore %#" PRIx64 5265 " that has already been signaled but not waited on by queue %#" PRIx64 ".", 5266 reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore), 5267 reinterpret_cast<uint64_t &>(dev_data->semaphoreMap[semaphore].queue)); 5268 } else { 5269 dev_data->semaphoreMap[semaphore].signaled = true; 5270 dev_data->semaphoreMap[semaphore].queue = queue; 5271 } 5272 } 5273 } 5274 for (uint32_t i = 0; i < submit->commandBufferCount; i++) { 5275 skipCall |= ValidateCmdBufImageLayouts(submit->pCommandBuffers[i]); 5276 pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]); 5277 if (pCBNode) { 5278 pCBNode->semaphores = semaphoreList; 5279 pCBNode->submitCount++; // increment submit count 5280 pCBNode->lastSubmittedFence = fence; 5281 pCBNode->lastSubmittedQueue = queue; 5282 skipCall |= validatePrimaryCommandBufferState(dev_data, pCBNode); 5283 // Call submit-time functions to validate/update state 5284 for (auto &function : pCBNode->validate_functions) { 5285 skipCall |= function(); 5286 } 5287 for (auto &function : pCBNode->eventUpdates) { 5288 skipCall |= function(queue); 5289 } 5290 } 5291 } 5292 } 5293 markCommandBuffersInFlight(dev_data, queue, submitCount, pSubmits, fence); 5294 lock.unlock(); 5295 if (!skipCall) 5296 result = dev_data->device_dispatch_table->QueueSubmit(queue, submitCount, pSubmits, fence); 5297 5298 return result; 5299} 5300 5301#if MTMERGESOURCE 5302VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo, 5303 const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) { 5304 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5305 VkResult result = my_data->device_dispatch_table->AllocateMemory(device, pAllocateInfo, pAllocator, pMemory); 5306 // TODO : Track allocations and overall size here 5307 std::lock_guard<std::mutex> lock(global_lock); 5308 add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo); 5309 print_mem_list(my_data); 5310 return result; 5311} 5312 5313VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5314vkFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) { 5315 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5316 5317 // From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed. 5318 // Before freeing a memory object, an application must ensure the memory object is no longer 5319 // in use by the device—for example by command buffers queued for execution. The memory need 5320 // not yet be unbound from all images and buffers, but any further use of those images or 5321 // buffers (on host or device) for anything other than destroying those objects will result in 5322 // undefined behavior. 5323 5324 std::unique_lock<std::mutex> lock(global_lock); 5325 freeMemObjInfo(my_data, device, mem, false); 5326 print_mem_list(my_data); 5327 printCBList(my_data); 5328 lock.unlock(); 5329 my_data->device_dispatch_table->FreeMemory(device, mem, pAllocator); 5330} 5331 5332static bool validateMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) { 5333 bool skipCall = false; 5334 5335 if (size == 0) { 5336 // TODO: a size of 0 is not listed as an invalid use in the spec, should it be? 5337 skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 5338 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM", 5339 "VkMapMemory: Attempting to map memory range of size zero"); 5340 } 5341 5342 auto mem_element = my_data->memObjMap.find(mem); 5343 if (mem_element != my_data->memObjMap.end()) { 5344 // It is an application error to call VkMapMemory on an object that is already mapped 5345 if (mem_element->second.memRange.size != 0) { 5346 skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 5347 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM", 5348 "VkMapMemory: Attempting to map memory on an already-mapped object %#" PRIxLEAST64, (uint64_t)mem); 5349 } 5350 5351 // Validate that offset + size is within object's allocationSize 5352 if (size == VK_WHOLE_SIZE) { 5353 if (offset >= mem_element->second.allocInfo.allocationSize) { 5354 skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 5355 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, 5356 "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset, 5357 mem_element->second.allocInfo.allocationSize, mem_element->second.allocInfo.allocationSize); 5358 } 5359 } else { 5360 if ((offset + size) > mem_element->second.allocInfo.allocationSize) { 5361 skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 5362 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, 5363 "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset, 5364 size + offset, mem_element->second.allocInfo.allocationSize); 5365 } 5366 } 5367 } 5368 return skipCall; 5369} 5370 5371static void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) { 5372 auto mem_element = my_data->memObjMap.find(mem); 5373 if (mem_element != my_data->memObjMap.end()) { 5374 MemRange new_range; 5375 new_range.offset = offset; 5376 new_range.size = size; 5377 mem_element->second.memRange = new_range; 5378 } 5379} 5380 5381static bool deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) { 5382 bool skipCall = false; 5383 auto mem_element = my_data->memObjMap.find(mem); 5384 if (mem_element != my_data->memObjMap.end()) { 5385 if (!mem_element->second.memRange.size) { 5386 // Valid Usage: memory must currently be mapped 5387 skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 5388 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM", 5389 "Unmapping Memory without memory being mapped: mem obj %#" PRIxLEAST64, (uint64_t)mem); 5390 } 5391 mem_element->second.memRange.size = 0; 5392 if (mem_element->second.pData) { 5393 free(mem_element->second.pData); 5394 mem_element->second.pData = 0; 5395 } 5396 } 5397 return skipCall; 5398} 5399 5400static char NoncoherentMemoryFillValue = 0xb; 5401 5402static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize size, void **ppData) { 5403 auto mem_element = dev_data->memObjMap.find(mem); 5404 if (mem_element != dev_data->memObjMap.end()) { 5405 mem_element->second.pDriverData = *ppData; 5406 uint32_t index = mem_element->second.allocInfo.memoryTypeIndex; 5407 if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) { 5408 mem_element->second.pData = 0; 5409 } else { 5410 if (size == VK_WHOLE_SIZE) { 5411 size = mem_element->second.allocInfo.allocationSize; 5412 } 5413 size_t convSize = (size_t)(size); 5414 mem_element->second.pData = malloc(2 * convSize); 5415 memset(mem_element->second.pData, NoncoherentMemoryFillValue, 2 * convSize); 5416 *ppData = static_cast<char *>(mem_element->second.pData) + (convSize / 2); 5417 } 5418 } 5419} 5420#endif 5421// Verify that state for fence being waited on is appropriate. That is, 5422// a fence being waited on should not already be signalled and 5423// it should have been submitted on a queue or during acquire next image 5424static inline bool verifyWaitFenceState(VkDevice device, VkFence fence, const char *apiCall) { 5425 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5426 bool skipCall = false; 5427 auto pFenceInfo = my_data->fenceMap.find(fence); 5428 if (pFenceInfo != my_data->fenceMap.end()) { 5429 if (!pFenceInfo->second.firstTimeFlag) { 5430 if (!pFenceInfo->second.needsSignaled) { 5431 skipCall |= 5432 log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, 5433 (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM", 5434 "%s specified fence %#" PRIxLEAST64 " already in SIGNALED state.", apiCall, (uint64_t)fence); 5435 } 5436 if (!pFenceInfo->second.queue && !pFenceInfo->second.swapchain) { // Checking status of unsubmitted fence 5437 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, 5438 reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM", 5439 "%s called for fence %#" PRIxLEAST64 " which has not been submitted on a Queue or during " 5440 "acquire next image.", 5441 apiCall, reinterpret_cast<uint64_t &>(fence)); 5442 } 5443 } else { 5444 pFenceInfo->second.firstTimeFlag = false; 5445 } 5446 } 5447 return skipCall; 5448} 5449 5450VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 5451vkWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) { 5452 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5453 bool skip_call = false; 5454 // Verify fence status of submitted fences 5455 std::unique_lock<std::mutex> lock(global_lock); 5456 for (uint32_t i = 0; i < fenceCount; i++) { 5457 skip_call |= verifyWaitFenceState(device, pFences[i], "vkWaitForFences"); 5458 } 5459 lock.unlock(); 5460 if (skip_call) 5461 return VK_ERROR_VALIDATION_FAILED_EXT; 5462 5463 VkResult result = dev_data->device_dispatch_table->WaitForFences(device, fenceCount, pFences, waitAll, timeout); 5464 5465 if (result == VK_SUCCESS) { 5466 lock.lock(); 5467 // When we know that all fences are complete we can clean/remove their CBs 5468 if (waitAll || fenceCount == 1) { 5469 skip_call |= decrementResources(dev_data, fenceCount, pFences); 5470 } 5471 // NOTE : Alternate case not handled here is when some fences have completed. In 5472 // this case for app to guarantee which fences completed it will have to call 5473 // vkGetFenceStatus() at which point we'll clean/remove their CBs if complete. 5474 lock.unlock(); 5475 } 5476 if (skip_call) 5477 return VK_ERROR_VALIDATION_FAILED_EXT; 5478 return result; 5479} 5480 5481VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(VkDevice device, VkFence fence) { 5482 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5483 bool skipCall = false; 5484 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 5485 std::unique_lock<std::mutex> lock(global_lock); 5486 skipCall = verifyWaitFenceState(device, fence, "vkGetFenceStatus"); 5487 lock.unlock(); 5488 5489 if (skipCall) 5490 return result; 5491 5492 result = dev_data->device_dispatch_table->GetFenceStatus(device, fence); 5493 bool skip_call = false; 5494 lock.lock(); 5495 if (result == VK_SUCCESS) { 5496 skipCall |= decrementResources(dev_data, 1, &fence); 5497 } 5498 lock.unlock(); 5499 if (skip_call) 5500 return VK_ERROR_VALIDATION_FAILED_EXT; 5501 return result; 5502} 5503 5504VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, 5505 VkQueue *pQueue) { 5506 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5507 dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue); 5508 std::lock_guard<std::mutex> lock(global_lock); 5509 5510 // Add queue to tracking set only if it is new 5511 auto result = dev_data->queues.emplace(*pQueue); 5512 if (result.second == true) { 5513 QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue]; 5514 pQNode->device = device; 5515 } 5516} 5517 5518VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(VkQueue queue) { 5519 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map); 5520 bool skip_call = false; 5521 skip_call |= decrementResources(dev_data, queue); 5522 if (skip_call) 5523 return VK_ERROR_VALIDATION_FAILED_EXT; 5524 VkResult result = dev_data->device_dispatch_table->QueueWaitIdle(queue); 5525 return result; 5526} 5527 5528VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(VkDevice device) { 5529 bool skip_call = false; 5530 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5531 std::unique_lock<std::mutex> lock(global_lock); 5532 for (auto queue : dev_data->queues) { 5533 skip_call |= decrementResources(dev_data, queue); 5534 } 5535 dev_data->globalInFlightCmdBuffers.clear(); 5536 lock.unlock(); 5537 if (skip_call) 5538 return VK_ERROR_VALIDATION_FAILED_EXT; 5539 VkResult result = dev_data->device_dispatch_table->DeviceWaitIdle(device); 5540 return result; 5541} 5542 5543VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) { 5544 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5545 bool skipCall = false; 5546 std::unique_lock<std::mutex> lock(global_lock); 5547 auto fence_pair = dev_data->fenceMap.find(fence); 5548 if (fence_pair != dev_data->fenceMap.end()) { 5549 if (fence_pair->second.in_use.load()) { 5550 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, 5551 (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS", 5552 "Fence %#" PRIx64 " is in use by a command buffer.", (uint64_t)(fence)); 5553 } 5554 dev_data->fenceMap.erase(fence_pair); 5555 } 5556 lock.unlock(); 5557 5558 if (!skipCall) 5559 dev_data->device_dispatch_table->DestroyFence(device, fence, pAllocator); 5560} 5561 5562VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5563vkDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) { 5564 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5565 dev_data->device_dispatch_table->DestroySemaphore(device, semaphore, pAllocator); 5566 std::lock_guard<std::mutex> lock(global_lock); 5567 auto item = dev_data->semaphoreMap.find(semaphore); 5568 if (item != dev_data->semaphoreMap.end()) { 5569 if (item->second.in_use.load()) { 5570 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, 5571 reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS", 5572 "Cannot delete semaphore %" PRIx64 " which is in use.", reinterpret_cast<uint64_t &>(semaphore)); 5573 } 5574 dev_data->semaphoreMap.erase(semaphore); 5575 } 5576 // TODO : Clean up any internal data structures using this obj. 5577} 5578 5579VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) { 5580 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5581 bool skip_call = false; 5582 std::unique_lock<std::mutex> lock(global_lock); 5583 auto event_data = dev_data->eventMap.find(event); 5584 if (event_data != dev_data->eventMap.end()) { 5585 if (event_data->second.in_use.load()) { 5586 skip_call |= log_msg( 5587 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 5588 reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS", 5589 "Cannot delete event %" PRIx64 " which is in use by a command buffer.", reinterpret_cast<uint64_t &>(event)); 5590 } 5591 dev_data->eventMap.erase(event_data); 5592 } 5593 lock.unlock(); 5594 if (!skip_call) 5595 dev_data->device_dispatch_table->DestroyEvent(device, event, pAllocator); 5596 // TODO : Clean up any internal data structures using this obj. 5597} 5598 5599VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5600vkDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) { 5601 get_my_data_ptr(get_dispatch_key(device), layer_data_map) 5602 ->device_dispatch_table->DestroyQueryPool(device, queryPool, pAllocator); 5603 // TODO : Clean up any internal data structures using this obj. 5604} 5605 5606VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, 5607 uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride, 5608 VkQueryResultFlags flags) { 5609 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5610 unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight; 5611 GLOBAL_CB_NODE *pCB = nullptr; 5612 std::unique_lock<std::mutex> lock(global_lock); 5613 for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) { 5614 pCB = getCBNode(dev_data, cmdBuffer); 5615 for (auto queryStatePair : pCB->queryToStateMap) { 5616 queriesInFlight[queryStatePair.first].push_back(cmdBuffer); 5617 } 5618 } 5619 bool skip_call = false; 5620 for (uint32_t i = 0; i < queryCount; ++i) { 5621 QueryObject query = {queryPool, firstQuery + i}; 5622 auto queryElement = queriesInFlight.find(query); 5623 auto queryToStateElement = dev_data->queryToStateMap.find(query); 5624 if (queryToStateElement != dev_data->queryToStateMap.end()) { 5625 // Available and in flight 5626 if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() && 5627 queryToStateElement->second) { 5628 for (auto cmdBuffer : queryElement->second) { 5629 pCB = getCBNode(dev_data, cmdBuffer); 5630 auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query); 5631 if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) { 5632 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 5633 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS", 5634 "Cannot get query results on queryPool %" PRIu64 " with index %d which is in flight.", 5635 (uint64_t)(queryPool), firstQuery + i); 5636 } else { 5637 for (auto event : queryEventElement->second) { 5638 dev_data->eventMap[event].needsSignaled = true; 5639 } 5640 } 5641 } 5642 // Unavailable and in flight 5643 } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() && 5644 !queryToStateElement->second) { 5645 // TODO : Can there be the same query in use by multiple command buffers in flight? 5646 bool make_available = false; 5647 for (auto cmdBuffer : queryElement->second) { 5648 pCB = getCBNode(dev_data, cmdBuffer); 5649 make_available |= pCB->queryToStateMap[query]; 5650 } 5651 if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) { 5652 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 5653 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS", 5654 "Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.", 5655 (uint64_t)(queryPool), firstQuery + i); 5656 } 5657 // Unavailable 5658 } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) { 5659 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 5660 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS", 5661 "Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.", 5662 (uint64_t)(queryPool), firstQuery + i); 5663 // Unitialized 5664 } else if (queryToStateElement == dev_data->queryToStateMap.end()) { 5665 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 5666 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS", 5667 "Cannot get query results on queryPool %" PRIu64 5668 " with index %d as data has not been collected for this index.", 5669 (uint64_t)(queryPool), firstQuery + i); 5670 } 5671 } 5672 } 5673 lock.unlock(); 5674 if (skip_call) 5675 return VK_ERROR_VALIDATION_FAILED_EXT; 5676 return dev_data->device_dispatch_table->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, 5677 flags); 5678} 5679 5680static bool validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) { 5681 bool skip_call = false; 5682 auto buffer_data = my_data->bufferMap.find(buffer); 5683 if (buffer_data == my_data->bufferMap.end()) { 5684 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, 5685 (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS", 5686 "Cannot free buffer %" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer)); 5687 } else { 5688 if (buffer_data->second.in_use.load()) { 5689 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, 5690 (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS", 5691 "Cannot free buffer %" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer)); 5692 } 5693 } 5694 return skip_call; 5695} 5696 5697VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5698vkDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) { 5699 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5700 bool skipCall = false; 5701 std::unique_lock<std::mutex> lock(global_lock); 5702 if (!validateIdleBuffer(dev_data, buffer) && !skipCall) { 5703 lock.unlock(); 5704 dev_data->device_dispatch_table->DestroyBuffer(device, buffer, pAllocator); 5705 lock.lock(); 5706 } 5707 dev_data->bufferMap.erase(buffer); 5708} 5709 5710VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5711vkDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) { 5712 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5713 dev_data->device_dispatch_table->DestroyBufferView(device, bufferView, pAllocator); 5714 std::lock_guard<std::mutex> lock(global_lock); 5715 auto item = dev_data->bufferViewMap.find(bufferView); 5716 if (item != dev_data->bufferViewMap.end()) { 5717 dev_data->bufferViewMap.erase(item); 5718 } 5719} 5720 5721VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) { 5722 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5723 bool skipCall = false; 5724 if (!skipCall) 5725 dev_data->device_dispatch_table->DestroyImage(device, image, pAllocator); 5726 5727 std::lock_guard<std::mutex> lock(global_lock); 5728 const auto& entry = dev_data->imageMap.find(image); 5729 if (entry != dev_data->imageMap.end()) { 5730 // Clear any memory mapping for this image 5731 auto mem_entry = dev_data->memObjMap.find(entry->second.mem); 5732 if (mem_entry != dev_data->memObjMap.end()) 5733 mem_entry->second.image = VK_NULL_HANDLE; 5734 5735 // Remove image from imageMap 5736 dev_data->imageMap.erase(entry); 5737 } 5738 const auto& subEntry = dev_data->imageSubresourceMap.find(image); 5739 if (subEntry != dev_data->imageSubresourceMap.end()) { 5740 for (const auto& pair : subEntry->second) { 5741 dev_data->imageLayoutMap.erase(pair); 5742 } 5743 dev_data->imageSubresourceMap.erase(subEntry); 5744 } 5745} 5746#if MTMERGESOURCE 5747static bool print_memory_range_error(layer_data *dev_data, const uint64_t object_handle, const uint64_t other_handle, 5748 VkDebugReportObjectTypeEXT object_type) { 5749 if (object_type == VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT) { 5750 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0, 5751 MEMTRACK_INVALID_ALIASING, "MEM", "Buffer %" PRIx64 " is alised with image %" PRIx64, object_handle, 5752 other_handle); 5753 } else { 5754 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0, 5755 MEMTRACK_INVALID_ALIASING, "MEM", "Image %" PRIx64 " is alised with buffer %" PRIx64, object_handle, 5756 other_handle); 5757 } 5758} 5759 5760static bool validate_memory_range(layer_data *dev_data, const vector<MEMORY_RANGE> &ranges, const MEMORY_RANGE &new_range, 5761 VkDebugReportObjectTypeEXT object_type) { 5762 bool skip_call = false; 5763 5764 for (auto range : ranges) { 5765 if ((range.end & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)) < 5766 (new_range.start & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1))) 5767 continue; 5768 if ((range.start & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)) > 5769 (new_range.end & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1))) 5770 continue; 5771 skip_call |= print_memory_range_error(dev_data, new_range.handle, range.handle, object_type); 5772 } 5773 return skip_call; 5774} 5775 5776static bool validate_buffer_image_aliasing(layer_data *dev_data, uint64_t handle, VkDeviceMemory mem, VkDeviceSize memoryOffset, 5777 VkMemoryRequirements memRequirements, vector<MEMORY_RANGE> &ranges, 5778 const vector<MEMORY_RANGE> &other_ranges, VkDebugReportObjectTypeEXT object_type) { 5779 MEMORY_RANGE range; 5780 range.handle = handle; 5781 range.memory = mem; 5782 range.start = memoryOffset; 5783 range.end = memoryOffset + memRequirements.size - 1; 5784 ranges.push_back(range); 5785 return validate_memory_range(dev_data, other_ranges, range, object_type); 5786} 5787 5788VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 5789vkBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) { 5790 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5791 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 5792 std::unique_lock<std::mutex> lock(global_lock); 5793 // Track objects tied to memory 5794 uint64_t buffer_handle = (uint64_t)(buffer); 5795 bool skipCall = 5796 set_mem_binding(dev_data, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory"); 5797 auto buffer_node = dev_data->bufferMap.find(buffer); 5798 if (buffer_node != dev_data->bufferMap.end()) { 5799 buffer_node->second.mem = mem; 5800 VkMemoryRequirements memRequirements; 5801 dev_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, &memRequirements); 5802 skipCall |= validate_buffer_image_aliasing(dev_data, buffer_handle, mem, memoryOffset, memRequirements, 5803 dev_data->memObjMap[mem].bufferRanges, dev_data->memObjMap[mem].imageRanges, 5804 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT); 5805 // Validate memory requirements alignment 5806 if (vk_safe_modulo(memoryOffset, memRequirements.alignment) != 0) { 5807 skipCall |= 5808 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, 5809 __LINE__, DRAWSTATE_INVALID_BUFFER_MEMORY_OFFSET, "DS", 5810 "vkBindBufferMemory(): memoryOffset is %#" PRIxLEAST64 " but must be an integer multiple of the " 5811 "VkMemoryRequirements::alignment value %#" PRIxLEAST64 5812 ", returned from a call to vkGetBufferMemoryRequirements with buffer", 5813 memoryOffset, memRequirements.alignment); 5814 } 5815 // Validate device limits alignments 5816 VkBufferUsageFlags usage = dev_data->bufferMap[buffer].createInfo.usage; 5817 if (usage & (VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT)) { 5818 if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment) != 0) { 5819 skipCall |= 5820 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 5821 0, __LINE__, DRAWSTATE_INVALID_TEXEL_BUFFER_OFFSET, "DS", 5822 "vkBindBufferMemory(): memoryOffset is %#" PRIxLEAST64 " but must be a multiple of " 5823 "device limit minTexelBufferOffsetAlignment %#" PRIxLEAST64, 5824 memoryOffset, dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment); 5825 } 5826 } 5827 if (usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) { 5828 if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 5829 0) { 5830 skipCall |= 5831 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 5832 0, __LINE__, DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS", 5833 "vkBindBufferMemory(): memoryOffset is %#" PRIxLEAST64 " but must be a multiple of " 5834 "device limit minUniformBufferOffsetAlignment %#" PRIxLEAST64, 5835 memoryOffset, dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment); 5836 } 5837 } 5838 if (usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) { 5839 if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 5840 0) { 5841 skipCall |= 5842 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 5843 0, __LINE__, DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS", 5844 "vkBindBufferMemory(): memoryOffset is %#" PRIxLEAST64 " but must be a multiple of " 5845 "device limit minStorageBufferOffsetAlignment %#" PRIxLEAST64, 5846 memoryOffset, dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment); 5847 } 5848 } 5849 } 5850 print_mem_list(dev_data); 5851 lock.unlock(); 5852 if (!skipCall) { 5853 result = dev_data->device_dispatch_table->BindBufferMemory(device, buffer, mem, memoryOffset); 5854 } 5855 return result; 5856} 5857 5858VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5859vkGetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) { 5860 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5861 // TODO : What to track here? 5862 // Could potentially save returned mem requirements and validate values passed into BindBufferMemory 5863 my_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, pMemoryRequirements); 5864} 5865 5866VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5867vkGetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) { 5868 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5869 // TODO : What to track here? 5870 // Could potentially save returned mem requirements and validate values passed into BindImageMemory 5871 my_data->device_dispatch_table->GetImageMemoryRequirements(device, image, pMemoryRequirements); 5872} 5873#endif 5874VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5875vkDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) { 5876 get_my_data_ptr(get_dispatch_key(device), layer_data_map) 5877 ->device_dispatch_table->DestroyImageView(device, imageView, pAllocator); 5878 // TODO : Clean up any internal data structures using this obj. 5879} 5880 5881VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5882vkDestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) { 5883 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5884 5885 std::unique_lock<std::mutex> lock(global_lock); 5886 my_data->shaderModuleMap.erase(shaderModule); 5887 lock.unlock(); 5888 5889 my_data->device_dispatch_table->DestroyShaderModule(device, shaderModule, pAllocator); 5890} 5891 5892VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5893vkDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) { 5894 get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyPipeline(device, pipeline, pAllocator); 5895 // TODO : Clean up any internal data structures using this obj. 5896} 5897 5898VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5899vkDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) { 5900 get_my_data_ptr(get_dispatch_key(device), layer_data_map) 5901 ->device_dispatch_table->DestroyPipelineLayout(device, pipelineLayout, pAllocator); 5902 // TODO : Clean up any internal data structures using this obj. 5903} 5904 5905VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5906vkDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) { 5907 get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroySampler(device, sampler, pAllocator); 5908 // TODO : Clean up any internal data structures using this obj. 5909} 5910 5911VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5912vkDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) { 5913 get_my_data_ptr(get_dispatch_key(device), layer_data_map) 5914 ->device_dispatch_table->DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator); 5915 // TODO : Clean up any internal data structures using this obj. 5916} 5917 5918VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5919vkDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) { 5920 get_my_data_ptr(get_dispatch_key(device), layer_data_map) 5921 ->device_dispatch_table->DestroyDescriptorPool(device, descriptorPool, pAllocator); 5922 // TODO : Clean up any internal data structures using this obj. 5923} 5924// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip_call result 5925// If this is a secondary command buffer, then make sure its primary is also in-flight 5926// If primary is not in-flight, then remove secondary from global in-flight set 5927// This function is only valid at a point when cmdBuffer is being reset or freed 5928static bool checkAndClearCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action) { 5929 bool skip_call = false; 5930 if (dev_data->globalInFlightCmdBuffers.count(cb_node->commandBuffer)) { 5931 // Primary CB or secondary where primary is also in-flight is an error 5932 if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_SECONDARY) || 5933 (dev_data->globalInFlightCmdBuffers.count(cb_node->primaryCommandBuffer))) { 5934 skip_call |= log_msg( 5935 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 5936 reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS", 5937 "Attempt to %s command buffer (%#" PRIxLEAST64 ") which is in use.", action, 5938 reinterpret_cast<const uint64_t &>(cb_node->commandBuffer)); 5939 } else { // Secondary CB w/o primary in-flight, remove from in-flight 5940 dev_data->globalInFlightCmdBuffers.erase(cb_node->commandBuffer); 5941 } 5942 } 5943 return skip_call; 5944} 5945// Iterate over all cmdBuffers in given commandPool and verify that each is not in use 5946static bool checkAndClearCommandBuffersInFlight(layer_data *dev_data, const VkCommandPool commandPool, const char *action) { 5947 bool skip_call = false; 5948 auto pool_data = dev_data->commandPoolMap.find(commandPool); 5949 if (pool_data != dev_data->commandPoolMap.end()) { 5950 for (auto cmd_buffer : pool_data->second.commandBuffers) { 5951 if (dev_data->globalInFlightCmdBuffers.count(cmd_buffer)) { 5952 skip_call |= checkAndClearCommandBufferInFlight(dev_data, getCBNode(dev_data, cmd_buffer), action); 5953 } 5954 } 5955 } 5956 return skip_call; 5957} 5958 5959VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 5960vkFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) { 5961 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5962 5963 bool skip_call = false; 5964 std::unique_lock<std::mutex> lock(global_lock); 5965 for (uint32_t i = 0; i < commandBufferCount; i++) { 5966 auto cb_pair = dev_data->commandBufferMap.find(pCommandBuffers[i]); 5967 skip_call |= checkAndClearCommandBufferInFlight(dev_data, cb_pair->second, "free"); 5968 // Delete CB information structure, and remove from commandBufferMap 5969 if (cb_pair != dev_data->commandBufferMap.end()) { 5970 // reset prior to delete for data clean-up 5971 resetCB(dev_data, (*cb_pair).second->commandBuffer); 5972 delete (*cb_pair).second; 5973 dev_data->commandBufferMap.erase(cb_pair); 5974 } 5975 5976 // Remove commandBuffer reference from commandPoolMap 5977 dev_data->commandPoolMap[commandPool].commandBuffers.remove(pCommandBuffers[i]); 5978 } 5979#if MTMERGESOURCE 5980 printCBList(dev_data); 5981#endif 5982 lock.unlock(); 5983 5984 if (!skip_call) 5985 dev_data->device_dispatch_table->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers); 5986} 5987 5988VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo, 5989 const VkAllocationCallbacks *pAllocator, 5990 VkCommandPool *pCommandPool) { 5991 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 5992 5993 VkResult result = dev_data->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool); 5994 5995 if (VK_SUCCESS == result) { 5996 std::lock_guard<std::mutex> lock(global_lock); 5997 dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags; 5998 dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex; 5999 } 6000 return result; 6001} 6002 6003VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo, 6004 const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) { 6005 6006 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6007 VkResult result = dev_data->device_dispatch_table->CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool); 6008 if (result == VK_SUCCESS) { 6009 std::lock_guard<std::mutex> lock(global_lock); 6010 dev_data->queryPoolMap[*pQueryPool].createInfo = *pCreateInfo; 6011 } 6012 return result; 6013} 6014 6015// Destroy commandPool along with all of the commandBuffers allocated from that pool 6016VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 6017vkDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) { 6018 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6019 bool skipCall = false; 6020 std::unique_lock<std::mutex> lock(global_lock); 6021 // Verify that command buffers in pool are complete (not in-flight) 6022 VkBool32 result = checkAndClearCommandBuffersInFlight(dev_data, commandPool, "destroy command pool with"); 6023 // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandPoolMap 6024 if (dev_data->commandPoolMap.find(commandPool) != dev_data->commandPoolMap.end()) { 6025 for (auto poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.begin(); 6026 poolCb != dev_data->commandPoolMap[commandPool].commandBuffers.end();) { 6027 clear_cmd_buf_and_mem_references(dev_data, *poolCb); 6028 auto del_cb = dev_data->commandBufferMap.find(*poolCb); 6029 delete (*del_cb).second; // delete CB info structure 6030 dev_data->commandBufferMap.erase(del_cb); // Remove this command buffer 6031 poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.erase( 6032 poolCb); // Remove CB reference from commandPoolMap's list 6033 } 6034 } 6035 dev_data->commandPoolMap.erase(commandPool); 6036 6037 lock.unlock(); 6038 6039 if (result) 6040 return; 6041 6042 if (!skipCall) 6043 dev_data->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator); 6044} 6045 6046VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 6047vkResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) { 6048 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6049 bool skipCall = false; 6050 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 6051 6052 if (checkAndClearCommandBuffersInFlight(dev_data, commandPool, "reset command pool with")) 6053 return VK_ERROR_VALIDATION_FAILED_EXT; 6054 6055 if (!skipCall) 6056 result = dev_data->device_dispatch_table->ResetCommandPool(device, commandPool, flags); 6057 6058 // Reset all of the CBs allocated from this pool 6059 if (VK_SUCCESS == result) { 6060 std::lock_guard<std::mutex> lock(global_lock); 6061 auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin(); 6062 while (it != dev_data->commandPoolMap[commandPool].commandBuffers.end()) { 6063 resetCB(dev_data, (*it)); 6064 ++it; 6065 } 6066 } 6067 return result; 6068} 6069 6070VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) { 6071 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6072 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 6073 bool skipCall = false; 6074 std::unique_lock<std::mutex> lock(global_lock); 6075 for (uint32_t i = 0; i < fenceCount; ++i) { 6076 auto fence_item = dev_data->fenceMap.find(pFences[i]); 6077 if (fence_item != dev_data->fenceMap.end()) { 6078 fence_item->second.needsSignaled = true; 6079 if (fence_item->second.in_use.load()) { 6080 skipCall |= 6081 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, 6082 reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS", 6083 "Fence %#" PRIx64 " is in use by a command buffer.", reinterpret_cast<const uint64_t &>(pFences[i])); 6084 } 6085 } 6086 } 6087 lock.unlock(); 6088 if (!skipCall) 6089 result = dev_data->device_dispatch_table->ResetFences(device, fenceCount, pFences); 6090 return result; 6091} 6092 6093VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 6094vkDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) { 6095 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6096 std::unique_lock<std::mutex> lock(global_lock); 6097 auto fbNode = dev_data->frameBufferMap.find(framebuffer); 6098 if (fbNode != dev_data->frameBufferMap.end()) { 6099 for (auto cb : fbNode->second.referencingCmdBuffers) { 6100 auto cbNode = dev_data->commandBufferMap.find(cb); 6101 if (cbNode != dev_data->commandBufferMap.end()) { 6102 // Set CB as invalid and record destroyed framebuffer 6103 cbNode->second->state = CB_INVALID; 6104 cbNode->second->destroyedFramebuffers.insert(framebuffer); 6105 } 6106 } 6107 delete [] fbNode->second.createInfo.pAttachments; 6108 dev_data->frameBufferMap.erase(fbNode); 6109 } 6110 lock.unlock(); 6111 dev_data->device_dispatch_table->DestroyFramebuffer(device, framebuffer, pAllocator); 6112} 6113 6114VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 6115vkDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) { 6116 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6117 dev_data->device_dispatch_table->DestroyRenderPass(device, renderPass, pAllocator); 6118 std::lock_guard<std::mutex> lock(global_lock); 6119 dev_data->renderPassMap.erase(renderPass); 6120} 6121 6122VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo, 6123 const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) { 6124 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6125 6126 VkResult result = dev_data->device_dispatch_table->CreateBuffer(device, pCreateInfo, pAllocator, pBuffer); 6127 6128 if (VK_SUCCESS == result) { 6129 std::lock_guard<std::mutex> lock(global_lock); 6130 // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid 6131 dev_data->bufferMap[*pBuffer].createInfo = *pCreateInfo; 6132 dev_data->bufferMap[*pBuffer].in_use.store(0); 6133 } 6134 return result; 6135} 6136 6137VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo, 6138 const VkAllocationCallbacks *pAllocator, VkBufferView *pView) { 6139 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6140 VkResult result = dev_data->device_dispatch_table->CreateBufferView(device, pCreateInfo, pAllocator, pView); 6141 if (VK_SUCCESS == result) { 6142 std::lock_guard<std::mutex> lock(global_lock); 6143 dev_data->bufferViewMap[*pView] = VkBufferViewCreateInfo(*pCreateInfo); 6144#if MTMERGESOURCE 6145 // In order to create a valid buffer view, the buffer must have been created with at least one of the 6146 // following flags: UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT 6147 validate_buffer_usage_flags(dev_data, pCreateInfo->buffer, 6148 VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, false, 6149 "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT"); 6150#endif 6151 } 6152 return result; 6153} 6154 6155VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo, 6156 const VkAllocationCallbacks *pAllocator, VkImage *pImage) { 6157 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6158 6159 VkResult result = dev_data->device_dispatch_table->CreateImage(device, pCreateInfo, pAllocator, pImage); 6160 6161 if (VK_SUCCESS == result) { 6162 std::lock_guard<std::mutex> lock(global_lock); 6163 IMAGE_LAYOUT_NODE image_node; 6164 image_node.layout = pCreateInfo->initialLayout; 6165 image_node.format = pCreateInfo->format; 6166 dev_data->imageMap[*pImage].createInfo = *pCreateInfo; 6167 ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()}; 6168 dev_data->imageSubresourceMap[*pImage].push_back(subpair); 6169 dev_data->imageLayoutMap[subpair] = image_node; 6170 } 6171 return result; 6172} 6173 6174static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) { 6175 /* expects global_lock to be held by caller */ 6176 6177 auto image_node_it = dev_data->imageMap.find(image); 6178 if (image_node_it != dev_data->imageMap.end()) { 6179 /* If the caller used the special values VK_REMAINING_MIP_LEVELS and 6180 * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to 6181 * the actual values. 6182 */ 6183 if (range->levelCount == VK_REMAINING_MIP_LEVELS) { 6184 range->levelCount = image_node_it->second.createInfo.mipLevels - range->baseMipLevel; 6185 } 6186 6187 if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) { 6188 range->layerCount = image_node_it->second.createInfo.arrayLayers - range->baseArrayLayer; 6189 } 6190 } 6191} 6192 6193// Return the correct layer/level counts if the caller used the special 6194// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS. 6195static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range, 6196 VkImage image) { 6197 /* expects global_lock to be held by caller */ 6198 6199 *levels = range.levelCount; 6200 *layers = range.layerCount; 6201 auto image_node_it = dev_data->imageMap.find(image); 6202 if (image_node_it != dev_data->imageMap.end()) { 6203 if (range.levelCount == VK_REMAINING_MIP_LEVELS) { 6204 *levels = image_node_it->second.createInfo.mipLevels - range.baseMipLevel; 6205 } 6206 if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) { 6207 *layers = image_node_it->second.createInfo.arrayLayers - range.baseArrayLayer; 6208 } 6209 } 6210} 6211 6212VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo, 6213 const VkAllocationCallbacks *pAllocator, VkImageView *pView) { 6214 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6215 VkResult result = dev_data->device_dispatch_table->CreateImageView(device, pCreateInfo, pAllocator, pView); 6216 if (VK_SUCCESS == result) { 6217 std::lock_guard<std::mutex> lock(global_lock); 6218 VkImageViewCreateInfo localCI = VkImageViewCreateInfo(*pCreateInfo); 6219 ResolveRemainingLevelsLayers(dev_data, &localCI.subresourceRange, pCreateInfo->image); 6220 dev_data->imageViewMap[*pView] = localCI; 6221#if MTMERGESOURCE 6222 // Validate that img has correct usage flags set 6223 validate_image_usage_flags(dev_data, pCreateInfo->image, 6224 VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | 6225 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, 6226 false, "vkCreateImageView()", "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT]_BIT"); 6227#endif 6228 } 6229 return result; 6230} 6231 6232VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 6233vkCreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) { 6234 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6235 VkResult result = dev_data->device_dispatch_table->CreateFence(device, pCreateInfo, pAllocator, pFence); 6236 if (VK_SUCCESS == result) { 6237 std::lock_guard<std::mutex> lock(global_lock); 6238 auto &fence_node = dev_data->fenceMap[*pFence]; 6239 fence_node.createInfo = *pCreateInfo; 6240 fence_node.needsSignaled = true; 6241 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) { 6242 fence_node.firstTimeFlag = true; 6243 fence_node.needsSignaled = false; 6244 } 6245 fence_node.in_use.store(0); 6246 } 6247 return result; 6248} 6249 6250// TODO handle pipeline caches 6251VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo, 6252 const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) { 6253 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6254 VkResult result = dev_data->device_dispatch_table->CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache); 6255 return result; 6256} 6257 6258VKAPI_ATTR void VKAPI_CALL 6259vkDestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) { 6260 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6261 dev_data->device_dispatch_table->DestroyPipelineCache(device, pipelineCache, pAllocator); 6262} 6263 6264VKAPI_ATTR VkResult VKAPI_CALL 6265vkGetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) { 6266 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6267 VkResult result = dev_data->device_dispatch_table->GetPipelineCacheData(device, pipelineCache, pDataSize, pData); 6268 return result; 6269} 6270 6271VKAPI_ATTR VkResult VKAPI_CALL 6272vkMergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) { 6273 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6274 VkResult result = dev_data->device_dispatch_table->MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches); 6275 return result; 6276} 6277 6278// utility function to set collective state for pipeline 6279void set_pipeline_state(PIPELINE_NODE *pPipe) { 6280 // If any attachment used by this pipeline has blendEnable, set top-level blendEnable 6281 if (pPipe->graphicsPipelineCI.pColorBlendState) { 6282 for (size_t i = 0; i < pPipe->attachments.size(); ++i) { 6283 if (VK_TRUE == pPipe->attachments[i].blendEnable) { 6284 if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) && 6285 (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) || 6286 ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) && 6287 (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) || 6288 ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) && 6289 (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) || 6290 ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) && 6291 (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) { 6292 pPipe->blendConstantsEnabled = true; 6293 } 6294 } 6295 } 6296 } 6297} 6298 6299VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 6300vkCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, 6301 const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, 6302 VkPipeline *pPipelines) { 6303 VkResult result = VK_SUCCESS; 6304 // TODO What to do with pipelineCache? 6305 // The order of operations here is a little convoluted but gets the job done 6306 // 1. Pipeline create state is first shadowed into PIPELINE_NODE struct 6307 // 2. Create state is then validated (which uses flags setup during shadowing) 6308 // 3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap 6309 bool skipCall = false; 6310 // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic 6311 vector<PIPELINE_NODE *> pPipeNode(count); 6312 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6313 6314 uint32_t i = 0; 6315 std::unique_lock<std::mutex> lock(global_lock); 6316 6317 for (i = 0; i < count; i++) { 6318 pPipeNode[i] = new PIPELINE_NODE; 6319 pPipeNode[i]->initGraphicsPipeline(&pCreateInfos[i]); 6320 skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode, i); 6321 } 6322 6323 if (!skipCall) { 6324 lock.unlock(); 6325 result = dev_data->device_dispatch_table->CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, 6326 pPipelines); 6327 lock.lock(); 6328 for (i = 0; i < count; i++) { 6329 pPipeNode[i]->pipeline = pPipelines[i]; 6330 dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i]; 6331 } 6332 lock.unlock(); 6333 } else { 6334 for (i = 0; i < count; i++) { 6335 delete pPipeNode[i]; 6336 } 6337 lock.unlock(); 6338 return VK_ERROR_VALIDATION_FAILED_EXT; 6339 } 6340 return result; 6341} 6342 6343VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 6344vkCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, 6345 const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, 6346 VkPipeline *pPipelines) { 6347 VkResult result = VK_SUCCESS; 6348 bool skipCall = false; 6349 6350 // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic 6351 vector<PIPELINE_NODE *> pPipeNode(count); 6352 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6353 6354 uint32_t i = 0; 6355 std::unique_lock<std::mutex> lock(global_lock); 6356 for (i = 0; i < count; i++) { 6357 // TODO: Verify compute stage bits 6358 6359 // Create and initialize internal tracking data structure 6360 pPipeNode[i] = new PIPELINE_NODE; 6361 pPipeNode[i]->initComputePipeline(&pCreateInfos[i]); 6362 // memcpy(&pPipeNode[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo)); 6363 6364 // TODO: Add Compute Pipeline Verification 6365 // skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode[i]); 6366 } 6367 6368 if (!skipCall) { 6369 lock.unlock(); 6370 result = dev_data->device_dispatch_table->CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, 6371 pPipelines); 6372 lock.lock(); 6373 for (i = 0; i < count; i++) { 6374 pPipeNode[i]->pipeline = pPipelines[i]; 6375 dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i]; 6376 } 6377 lock.unlock(); 6378 } else { 6379 for (i = 0; i < count; i++) { 6380 // Clean up any locally allocated data structures 6381 delete pPipeNode[i]; 6382 } 6383 lock.unlock(); 6384 return VK_ERROR_VALIDATION_FAILED_EXT; 6385 } 6386 return result; 6387} 6388 6389VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo, 6390 const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) { 6391 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6392 VkResult result = dev_data->device_dispatch_table->CreateSampler(device, pCreateInfo, pAllocator, pSampler); 6393 if (VK_SUCCESS == result) { 6394 std::lock_guard<std::mutex> lock(global_lock); 6395 dev_data->sampleMap[*pSampler] = unique_ptr<SAMPLER_NODE>(new SAMPLER_NODE(pSampler, pCreateInfo)); 6396 } 6397 return result; 6398} 6399 6400VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 6401vkCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo, 6402 const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) { 6403 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6404 VkResult result = dev_data->device_dispatch_table->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout); 6405 if (VK_SUCCESS == result) { 6406 // TODOSC : Capture layout bindings set 6407 LAYOUT_NODE *pNewNode = new LAYOUT_NODE; 6408 if (NULL == pNewNode) { 6409 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, 6410 (uint64_t)*pSetLayout, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", 6411 "Out of memory while attempting to allocate LAYOUT_NODE in vkCreateDescriptorSetLayout()")) 6412 return VK_ERROR_VALIDATION_FAILED_EXT; 6413 } 6414 memcpy((void *)&pNewNode->createInfo, pCreateInfo, sizeof(VkDescriptorSetLayoutCreateInfo)); 6415 pNewNode->createInfo.pBindings = new VkDescriptorSetLayoutBinding[pCreateInfo->bindingCount]; 6416 memcpy((void *)pNewNode->createInfo.pBindings, pCreateInfo->pBindings, 6417 sizeof(VkDescriptorSetLayoutBinding) * pCreateInfo->bindingCount); 6418 // g++ does not like reserve with size 0 6419 if (pCreateInfo->bindingCount) 6420 pNewNode->bindingToIndexMap.reserve(pCreateInfo->bindingCount); 6421 uint32_t totalCount = 0; 6422 for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) { 6423 if (!pNewNode->bindingToIndexMap.emplace(pCreateInfo->pBindings[i].binding, i).second) { 6424 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 6425 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)*pSetLayout, __LINE__, 6426 DRAWSTATE_INVALID_LAYOUT, "DS", "duplicated binding number in " 6427 "VkDescriptorSetLayoutBinding")) 6428 return VK_ERROR_VALIDATION_FAILED_EXT; 6429 } else { 6430 pNewNode->bindingToIndexMap[pCreateInfo->pBindings[i].binding] = i; 6431 } 6432 totalCount += pCreateInfo->pBindings[i].descriptorCount; 6433 if (pCreateInfo->pBindings[i].pImmutableSamplers) { 6434 VkSampler **ppIS = (VkSampler **)&pNewNode->createInfo.pBindings[i].pImmutableSamplers; 6435 *ppIS = new VkSampler[pCreateInfo->pBindings[i].descriptorCount]; 6436 memcpy(*ppIS, pCreateInfo->pBindings[i].pImmutableSamplers, 6437 pCreateInfo->pBindings[i].descriptorCount * sizeof(VkSampler)); 6438 pNewNode->immutableSamplerCount += pCreateInfo->pBindings[i].descriptorCount; 6439 } 6440 } 6441 pNewNode->layout = *pSetLayout; 6442 pNewNode->startIndex = 0; 6443 if (totalCount > 0) { 6444 pNewNode->descriptorTypes.resize(totalCount); 6445 pNewNode->stageFlags.resize(totalCount); 6446 uint32_t offset = 0; 6447 uint32_t j = 0; 6448 VkDescriptorType dType; 6449 for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) { 6450 dType = pCreateInfo->pBindings[i].descriptorType; 6451 for (j = 0; j < pCreateInfo->pBindings[i].descriptorCount; j++) { 6452 pNewNode->descriptorTypes[offset + j] = dType; 6453 pNewNode->stageFlags[offset + j] = pCreateInfo->pBindings[i].stageFlags; 6454 if ((dType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) || 6455 (dType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) { 6456 pNewNode->dynamicDescriptorCount++; 6457 } 6458 } 6459 offset += j; 6460 } 6461 pNewNode->endIndex = pNewNode->startIndex + totalCount - 1; 6462 } else { // no descriptors 6463 pNewNode->endIndex = 0; 6464 } 6465 // Put new node at Head of global Layer list 6466 std::lock_guard<std::mutex> lock(global_lock); 6467 dev_data->descriptorSetLayoutMap[*pSetLayout] = pNewNode; 6468 } 6469 return result; 6470} 6471 6472static bool validatePushConstantSize(const layer_data *dev_data, const uint32_t offset, const uint32_t size, 6473 const char *caller_name) { 6474 bool skipCall = false; 6475 if ((offset + size) > dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize) { 6476 skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 6477 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that " 6478 "exceeds this device's maxPushConstantSize of %u.", 6479 caller_name, offset, size, dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize); 6480 } 6481 return skipCall; 6482} 6483 6484VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo, 6485 const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) { 6486 bool skipCall = false; 6487 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6488 uint32_t i = 0; 6489 for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) { 6490 skipCall |= validatePushConstantSize(dev_data, pCreateInfo->pPushConstantRanges[i].offset, 6491 pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()"); 6492 if ((pCreateInfo->pPushConstantRanges[i].size == 0) || ((pCreateInfo->pPushConstantRanges[i].size & 0x3) != 0)) { 6493 skipCall |= 6494 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 6495 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constant index %u with " 6496 "size %u. Size must be greater than zero and a multiple of 4.", 6497 i, pCreateInfo->pPushConstantRanges[i].size); 6498 } 6499 // TODO : Add warning if ranges overlap 6500 } 6501 VkResult result = dev_data->device_dispatch_table->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout); 6502 if (VK_SUCCESS == result) { 6503 std::lock_guard<std::mutex> lock(global_lock); 6504 // TODOSC : Merge capture of the setLayouts per pipeline 6505 PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout]; 6506 plNode.descriptorSetLayouts.resize(pCreateInfo->setLayoutCount); 6507 for (i = 0; i < pCreateInfo->setLayoutCount; ++i) { 6508 plNode.descriptorSetLayouts[i] = pCreateInfo->pSetLayouts[i]; 6509 } 6510 plNode.pushConstantRanges.resize(pCreateInfo->pushConstantRangeCount); 6511 for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) { 6512 plNode.pushConstantRanges[i] = pCreateInfo->pPushConstantRanges[i]; 6513 } 6514 } 6515 return result; 6516} 6517 6518VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 6519vkCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, 6520 VkDescriptorPool *pDescriptorPool) { 6521 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6522 VkResult result = dev_data->device_dispatch_table->CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool); 6523 if (VK_SUCCESS == result) { 6524 // Insert this pool into Global Pool LL at head 6525 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, 6526 (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool %#" PRIxLEAST64, 6527 (uint64_t)*pDescriptorPool)) 6528 return VK_ERROR_VALIDATION_FAILED_EXT; 6529 DESCRIPTOR_POOL_NODE *pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo); 6530 if (NULL == pNewNode) { 6531 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, 6532 (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", 6533 "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()")) 6534 return VK_ERROR_VALIDATION_FAILED_EXT; 6535 } else { 6536 std::lock_guard<std::mutex> lock(global_lock); 6537 dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode; 6538 } 6539 } else { 6540 // Need to do anything if pool create fails? 6541 } 6542 return result; 6543} 6544 6545VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 6546vkResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) { 6547 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6548 VkResult result = dev_data->device_dispatch_table->ResetDescriptorPool(device, descriptorPool, flags); 6549 if (VK_SUCCESS == result) { 6550 std::lock_guard<std::mutex> lock(global_lock); 6551 clearDescriptorPool(dev_data, device, descriptorPool, flags); 6552 } 6553 return result; 6554} 6555 6556VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 6557vkAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) { 6558 bool skipCall = false; 6559 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6560 6561 std::unique_lock<std::mutex> lock(global_lock); 6562 // Verify that requested descriptorSets are available in pool 6563 DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool); 6564 if (!pPoolNode) { 6565 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, 6566 (uint64_t)pAllocateInfo->descriptorPool, __LINE__, DRAWSTATE_INVALID_POOL, "DS", 6567 "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call", 6568 (uint64_t)pAllocateInfo->descriptorPool); 6569 } else { // Make sure pool has all the available descriptors before calling down chain 6570 skipCall |= validate_descriptor_availability_in_pool(dev_data, pPoolNode, pAllocateInfo->descriptorSetCount, 6571 pAllocateInfo->pSetLayouts); 6572 } 6573 lock.unlock(); 6574 if (skipCall) 6575 return VK_ERROR_VALIDATION_FAILED_EXT; 6576 VkResult result = dev_data->device_dispatch_table->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets); 6577 if (VK_SUCCESS == result) { 6578 lock.lock(); 6579 DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool); 6580 if (pPoolNode) { 6581 if (pAllocateInfo->descriptorSetCount == 0) { 6582 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 6583 pAllocateInfo->descriptorSetCount, __LINE__, DRAWSTATE_NONE, "DS", 6584 "AllocateDescriptorSets called with 0 count"); 6585 } 6586 for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) { 6587 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 6588 (uint64_t)pDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS", "Created Descriptor Set %#" PRIxLEAST64, 6589 (uint64_t)pDescriptorSets[i]); 6590 // Create new set node and add to head of pool nodes 6591 SET_NODE *pNewNode = new SET_NODE; 6592 if (NULL == pNewNode) { 6593 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 6594 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__, 6595 DRAWSTATE_OUT_OF_MEMORY, "DS", 6596 "Out of memory while attempting to allocate SET_NODE in vkAllocateDescriptorSets()")) { 6597 lock.unlock(); 6598 return VK_ERROR_VALIDATION_FAILED_EXT; 6599 } 6600 } else { 6601 // TODO : Pool should store a total count of each type of Descriptor available 6602 // When descriptors are allocated, decrement the count and validate here 6603 // that the count doesn't go below 0. One reset/free need to bump count back up. 6604 // Insert set at head of Set LL for this pool 6605 pNewNode->pNext = pPoolNode->pSets; 6606 pNewNode->in_use.store(0); 6607 pPoolNode->pSets = pNewNode; 6608 LAYOUT_NODE *pLayout = getLayoutNode(dev_data, pAllocateInfo->pSetLayouts[i]); 6609 if (NULL == pLayout) { 6610 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 6611 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)pAllocateInfo->pSetLayouts[i], 6612 __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS", 6613 "Unable to find set layout node for layout %#" PRIxLEAST64 6614 " specified in vkAllocateDescriptorSets() call", 6615 (uint64_t)pAllocateInfo->pSetLayouts[i])) { 6616 lock.unlock(); 6617 return VK_ERROR_VALIDATION_FAILED_EXT; 6618 } 6619 } 6620 pNewNode->pLayout = pLayout; 6621 pNewNode->pool = pAllocateInfo->descriptorPool; 6622 pNewNode->set = pDescriptorSets[i]; 6623 pNewNode->descriptorCount = (pLayout->createInfo.bindingCount != 0) ? pLayout->endIndex + 1 : 0; 6624 if (pNewNode->descriptorCount) { 6625 pNewNode->pDescriptorUpdates.resize(pNewNode->descriptorCount); 6626 } 6627 dev_data->setMap[pDescriptorSets[i]] = pNewNode; 6628 } 6629 } 6630 } 6631 lock.unlock(); 6632 } 6633 return result; 6634} 6635 6636VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 6637vkFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) { 6638 bool skipCall = false; 6639 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6640 // Make sure that no sets being destroyed are in-flight 6641 std::unique_lock<std::mutex> lock(global_lock); 6642 for (uint32_t i = 0; i < count; ++i) 6643 skipCall |= validateIdleDescriptorSet(dev_data, pDescriptorSets[i], "vkFreeDescriptorSets"); 6644 DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, descriptorPool); 6645 if (pPoolNode && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pPoolNode->createInfo.flags)) { 6646 // Can't Free from a NON_FREE pool 6647 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 6648 (uint64_t)device, __LINE__, DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, "DS", 6649 "It is invalid to call vkFreeDescriptorSets() with a pool created without setting " 6650 "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT."); 6651 } 6652 lock.unlock(); 6653 if (skipCall) 6654 return VK_ERROR_VALIDATION_FAILED_EXT; 6655 VkResult result = dev_data->device_dispatch_table->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets); 6656 if (VK_SUCCESS == result) { 6657 lock.lock(); 6658 6659 // Update available descriptor sets in pool 6660 pPoolNode->availableSets += count; 6661 6662 // For each freed descriptor add it back into the pool as available 6663 for (uint32_t i = 0; i < count; ++i) { 6664 SET_NODE *pSet = dev_data->setMap[pDescriptorSets[i]]; // getSetNode() without locking 6665 invalidateBoundCmdBuffers(dev_data, pSet); 6666 LAYOUT_NODE *pLayout = pSet->pLayout; 6667 uint32_t typeIndex = 0, poolSizeCount = 0; 6668 for (uint32_t j = 0; j < pLayout->createInfo.bindingCount; ++j) { 6669 typeIndex = static_cast<uint32_t>(pLayout->createInfo.pBindings[j].descriptorType); 6670 poolSizeCount = pLayout->createInfo.pBindings[j].descriptorCount; 6671 pPoolNode->availableDescriptorTypeCount[typeIndex] += poolSizeCount; 6672 } 6673 } 6674 lock.unlock(); 6675 } 6676 // TODO : Any other clean-up or book-keeping to do here? 6677 return result; 6678} 6679 6680VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 6681vkUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites, 6682 uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) { 6683 // dsUpdate will return true only if a bailout error occurs, so we want to call down tree when update returns false 6684 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6685 std::unique_lock<std::mutex> lock(global_lock); 6686 bool rtn = dsUpdate(dev_data, device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies); 6687 lock.unlock(); 6688 if (!rtn) { 6689 dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, 6690 pDescriptorCopies); 6691 } 6692} 6693 6694VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 6695vkAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) { 6696 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 6697 VkResult result = dev_data->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer); 6698 if (VK_SUCCESS == result) { 6699 std::unique_lock<std::mutex> lock(global_lock); 6700 auto const &cp_it = dev_data->commandPoolMap.find(pCreateInfo->commandPool); 6701 if (cp_it != dev_data->commandPoolMap.end()) { 6702 for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) { 6703 // Add command buffer to its commandPool map 6704 cp_it->second.commandBuffers.push_back(pCommandBuffer[i]); 6705 GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE; 6706 // Add command buffer to map 6707 dev_data->commandBufferMap[pCommandBuffer[i]] = pCB; 6708 resetCB(dev_data, pCommandBuffer[i]); 6709 pCB->createInfo = *pCreateInfo; 6710 pCB->device = device; 6711 } 6712 } 6713#if MTMERGESOURCE 6714 printCBList(dev_data); 6715#endif 6716 lock.unlock(); 6717 } 6718 return result; 6719} 6720 6721VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 6722vkBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) { 6723 bool skipCall = false; 6724 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6725 std::unique_lock<std::mutex> lock(global_lock); 6726 // Validate command buffer level 6727 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6728 if (pCB) { 6729 // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references 6730 if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) { 6731 skipCall |= 6732 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6733 (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM", 6734 "Calling vkBeginCommandBuffer() on active CB %p before it has completed. " 6735 "You must check CB fence before this call.", 6736 commandBuffer); 6737 } 6738 clear_cmd_buf_and_mem_references(dev_data, pCB); 6739 if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) { 6740 // Secondary Command Buffer 6741 const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo; 6742 if (!pInfo) { 6743 skipCall |= 6744 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6745 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS", 6746 "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must have inheritance info.", 6747 reinterpret_cast<void *>(commandBuffer)); 6748 } else { 6749 if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) { 6750 if (!pInfo->renderPass) { // renderpass should NOT be null for a Secondary CB 6751 skipCall |= log_msg( 6752 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6753 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS", 6754 "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must specify a valid renderpass parameter.", 6755 reinterpret_cast<void *>(commandBuffer)); 6756 } 6757 if (!pInfo->framebuffer) { // framebuffer may be null for a Secondary CB, but this affects perf 6758 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, 6759 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6760 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, 6761 "DS", "vkBeginCommandBuffer(): Secondary Command Buffers (%p) may perform better if a " 6762 "valid framebuffer parameter is specified.", 6763 reinterpret_cast<void *>(commandBuffer)); 6764 } else { 6765 string errorString = ""; 6766 auto fbNode = dev_data->frameBufferMap.find(pInfo->framebuffer); 6767 if (fbNode != dev_data->frameBufferMap.end()) { 6768 VkRenderPass fbRP = fbNode->second.createInfo.renderPass; 6769 if (!verify_renderpass_compatibility(dev_data, fbRP, pInfo->renderPass, errorString)) { 6770 // renderPass that framebuffer was created with must be compatible with local renderPass 6771 skipCall |= 6772 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 6773 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6774 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, 6775 "DS", "vkBeginCommandBuffer(): Secondary Command " 6776 "Buffer (%p) renderPass (%#" PRIxLEAST64 ") is incompatible w/ framebuffer " 6777 "(%#" PRIxLEAST64 ") w/ render pass (%#" PRIxLEAST64 ") due to: %s", 6778 reinterpret_cast<void *>(commandBuffer), (uint64_t)(pInfo->renderPass), 6779 (uint64_t)(pInfo->framebuffer), (uint64_t)(fbRP), errorString.c_str()); 6780 } 6781 // Connect this framebuffer to this cmdBuffer 6782 fbNode->second.referencingCmdBuffers.insert(pCB->commandBuffer); 6783 } 6784 } 6785 } 6786 if ((pInfo->occlusionQueryEnable == VK_FALSE || 6787 dev_data->phys_dev_properties.features.occlusionQueryPrecise == VK_FALSE) && 6788 (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) { 6789 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 6790 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer), 6791 __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS", 6792 "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must not have " 6793 "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not " 6794 "support precise occlusion queries.", 6795 reinterpret_cast<void *>(commandBuffer)); 6796 } 6797 } 6798 if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) { 6799 auto rp_data = dev_data->renderPassMap.find(pInfo->renderPass); 6800 if (rp_data != dev_data->renderPassMap.end() && rp_data->second && rp_data->second->pCreateInfo) { 6801 if (pInfo->subpass >= rp_data->second->pCreateInfo->subpassCount) { 6802 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 6803 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__, 6804 DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS", 6805 "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must has a subpass index (%d) " 6806 "that is less than the number of subpasses (%d).", 6807 (void *)commandBuffer, pInfo->subpass, rp_data->second->pCreateInfo->subpassCount); 6808 } 6809 } 6810 } 6811 } 6812 if (CB_RECORDING == pCB->state) { 6813 skipCall |= 6814 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6815 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS", 6816 "vkBeginCommandBuffer(): Cannot call Begin on CB (%#" PRIxLEAST64 6817 ") in the RECORDING state. Must first call vkEndCommandBuffer().", 6818 (uint64_t)commandBuffer); 6819 } else if (CB_RECORDED == pCB->state) { 6820 VkCommandPool cmdPool = pCB->createInfo.commandPool; 6821 if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) { 6822 skipCall |= 6823 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6824 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS", 6825 "Call to vkBeginCommandBuffer() on command buffer (%#" PRIxLEAST64 6826 ") attempts to implicitly reset cmdBuffer created from command pool (%#" PRIxLEAST64 6827 ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.", 6828 (uint64_t)commandBuffer, (uint64_t)cmdPool); 6829 } 6830 resetCB(dev_data, commandBuffer); 6831 } 6832 // Set updated state here in case implicit reset occurs above 6833 pCB->state = CB_RECORDING; 6834 pCB->beginInfo = *pBeginInfo; 6835 if (pCB->beginInfo.pInheritanceInfo) { 6836 pCB->inheritanceInfo = *(pCB->beginInfo.pInheritanceInfo); 6837 pCB->beginInfo.pInheritanceInfo = &pCB->inheritanceInfo; 6838 } 6839 } else { 6840 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6841 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 6842 "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB %p!", (void *)commandBuffer); 6843 } 6844 lock.unlock(); 6845 if (skipCall) { 6846 return VK_ERROR_VALIDATION_FAILED_EXT; 6847 } 6848 VkResult result = dev_data->device_dispatch_table->BeginCommandBuffer(commandBuffer, pBeginInfo); 6849 6850 return result; 6851} 6852 6853VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(VkCommandBuffer commandBuffer) { 6854 bool skipCall = false; 6855 VkResult result = VK_SUCCESS; 6856 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6857 std::unique_lock<std::mutex> lock(global_lock); 6858 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6859 if (pCB) { 6860 if (pCB->state != CB_RECORDING) { 6861 skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkEndCommandBuffer()"); 6862 } 6863 for (auto query : pCB->activeQueries) { 6864 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 6865 DRAWSTATE_INVALID_QUERY, "DS", 6866 "Ending command buffer with in progress query: queryPool %" PRIu64 ", index %d", 6867 (uint64_t)(query.pool), query.index); 6868 } 6869 } 6870 if (!skipCall) { 6871 lock.unlock(); 6872 result = dev_data->device_dispatch_table->EndCommandBuffer(commandBuffer); 6873 lock.lock(); 6874 if (VK_SUCCESS == result) { 6875 pCB->state = CB_RECORDED; 6876 // Reset CB status flags 6877 pCB->status = 0; 6878 printCB(dev_data, commandBuffer); 6879 } 6880 } else { 6881 result = VK_ERROR_VALIDATION_FAILED_EXT; 6882 } 6883 lock.unlock(); 6884 return result; 6885} 6886 6887VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 6888vkResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) { 6889 bool skip_call = false; 6890 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6891 std::unique_lock<std::mutex> lock(global_lock); 6892 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6893 VkCommandPool cmdPool = pCB->createInfo.commandPool; 6894 if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) { 6895 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 6896 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS", 6897 "Attempt to reset command buffer (%#" PRIxLEAST64 ") created from command pool (%#" PRIxLEAST64 6898 ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.", 6899 (uint64_t)commandBuffer, (uint64_t)cmdPool); 6900 } 6901 skip_call |= checkAndClearCommandBufferInFlight(dev_data, pCB, "reset"); 6902 lock.unlock(); 6903 if (skip_call) 6904 return VK_ERROR_VALIDATION_FAILED_EXT; 6905 VkResult result = dev_data->device_dispatch_table->ResetCommandBuffer(commandBuffer, flags); 6906 if (VK_SUCCESS == result) { 6907 lock.lock(); 6908 resetCB(dev_data, commandBuffer); 6909 lock.unlock(); 6910 } 6911 return result; 6912} 6913 6914#if MTMERGESOURCE 6915// TODO : For any vkCmdBind* calls that include an object which has mem bound to it, 6916// need to account for that mem now having binding to given commandBuffer 6917#endif 6918VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 6919vkCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) { 6920 bool skipCall = false; 6921 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6922 std::unique_lock<std::mutex> lock(global_lock); 6923 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6924 if (pCB) { 6925 skipCall |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()"); 6926 if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) { 6927 skipCall |= 6928 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, 6929 (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS", 6930 "Incorrectly binding compute pipeline (%#" PRIxLEAST64 ") during active RenderPass (%#" PRIxLEAST64 ")", 6931 (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass); 6932 } 6933 6934 PIPELINE_NODE *pPN = getPipeline(dev_data, pipeline); 6935 if (pPN) { 6936 pCB->lastBound[pipelineBindPoint].pipeline = pipeline; 6937 set_cb_pso_status(pCB, pPN); 6938 set_pipeline_state(pPN); 6939 skipCall |= validatePipelineState(dev_data, pCB, pipelineBindPoint, pipeline); 6940 } else { 6941 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, 6942 (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS", 6943 "Attempt to bind Pipeline %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline)); 6944 } 6945 } 6946 lock.unlock(); 6947 if (!skipCall) 6948 dev_data->device_dispatch_table->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline); 6949} 6950 6951VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 6952vkCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) { 6953 bool skipCall = false; 6954 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6955 std::unique_lock<std::mutex> lock(global_lock); 6956 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6957 if (pCB) { 6958 skipCall |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()"); 6959 pCB->status |= CBSTATUS_VIEWPORT_SET; 6960 pCB->viewports.resize(viewportCount); 6961 memcpy(pCB->viewports.data(), pViewports, viewportCount * sizeof(VkViewport)); 6962 } 6963 lock.unlock(); 6964 if (!skipCall) 6965 dev_data->device_dispatch_table->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports); 6966} 6967 6968VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 6969vkCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) { 6970 bool skipCall = false; 6971 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6972 std::unique_lock<std::mutex> lock(global_lock); 6973 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6974 if (pCB) { 6975 skipCall |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()"); 6976 pCB->status |= CBSTATUS_SCISSOR_SET; 6977 pCB->scissors.resize(scissorCount); 6978 memcpy(pCB->scissors.data(), pScissors, scissorCount * sizeof(VkRect2D)); 6979 } 6980 lock.unlock(); 6981 if (!skipCall) 6982 dev_data->device_dispatch_table->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors); 6983} 6984 6985VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) { 6986 bool skip_call = false; 6987 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 6988 std::unique_lock<std::mutex> lock(global_lock); 6989 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 6990 if (pCB) { 6991 skip_call |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()"); 6992 pCB->status |= CBSTATUS_LINE_WIDTH_SET; 6993 6994 PIPELINE_NODE *pPipeTrav = getPipeline(dev_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline); 6995 if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) { 6996 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 6997 reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SET, "DS", 6998 "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH" 6999 "flag. This is undefined behavior and could be ignored."); 7000 } else { 7001 skip_call |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, reinterpret_cast<uint64_t &>(commandBuffer), lineWidth); 7002 } 7003 } 7004 lock.unlock(); 7005 if (!skip_call) 7006 dev_data->device_dispatch_table->CmdSetLineWidth(commandBuffer, lineWidth); 7007} 7008 7009VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 7010vkCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) { 7011 bool skipCall = false; 7012 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7013 std::unique_lock<std::mutex> lock(global_lock); 7014 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7015 if (pCB) { 7016 skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()"); 7017 pCB->status |= CBSTATUS_DEPTH_BIAS_SET; 7018 } 7019 lock.unlock(); 7020 if (!skipCall) 7021 dev_data->device_dispatch_table->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, 7022 depthBiasSlopeFactor); 7023} 7024 7025VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) { 7026 bool skipCall = false; 7027 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7028 std::unique_lock<std::mutex> lock(global_lock); 7029 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7030 if (pCB) { 7031 skipCall |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()"); 7032 pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET; 7033 } 7034 lock.unlock(); 7035 if (!skipCall) 7036 dev_data->device_dispatch_table->CmdSetBlendConstants(commandBuffer, blendConstants); 7037} 7038 7039VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 7040vkCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) { 7041 bool skipCall = false; 7042 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7043 std::unique_lock<std::mutex> lock(global_lock); 7044 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7045 if (pCB) { 7046 skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()"); 7047 pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET; 7048 } 7049 lock.unlock(); 7050 if (!skipCall) 7051 dev_data->device_dispatch_table->CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds); 7052} 7053 7054VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 7055vkCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) { 7056 bool skipCall = false; 7057 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7058 std::unique_lock<std::mutex> lock(global_lock); 7059 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7060 if (pCB) { 7061 skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()"); 7062 pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET; 7063 } 7064 lock.unlock(); 7065 if (!skipCall) 7066 dev_data->device_dispatch_table->CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask); 7067} 7068 7069VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 7070vkCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) { 7071 bool skipCall = false; 7072 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7073 std::unique_lock<std::mutex> lock(global_lock); 7074 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7075 if (pCB) { 7076 skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()"); 7077 pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET; 7078 } 7079 lock.unlock(); 7080 if (!skipCall) 7081 dev_data->device_dispatch_table->CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask); 7082} 7083 7084VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 7085vkCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) { 7086 bool skipCall = false; 7087 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7088 std::unique_lock<std::mutex> lock(global_lock); 7089 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7090 if (pCB) { 7091 skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()"); 7092 pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET; 7093 } 7094 lock.unlock(); 7095 if (!skipCall) 7096 dev_data->device_dispatch_table->CmdSetStencilReference(commandBuffer, faceMask, reference); 7097} 7098 7099VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 7100vkCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, 7101 uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount, 7102 const uint32_t *pDynamicOffsets) { 7103 bool skipCall = false; 7104 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7105 std::unique_lock<std::mutex> lock(global_lock); 7106 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7107 if (pCB) { 7108 if (pCB->state == CB_RECORDING) { 7109 // Track total count of dynamic descriptor types to make sure we have an offset for each one 7110 uint32_t totalDynamicDescriptors = 0; 7111 string errorString = ""; 7112 uint32_t lastSetIndex = firstSet + setCount - 1; 7113 if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size()) 7114 pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1); 7115 VkDescriptorSet oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex]; 7116 for (uint32_t i = 0; i < setCount; i++) { 7117 SET_NODE *pSet = getSetNode(dev_data, pDescriptorSets[i]); 7118 if (pSet) { 7119 pCB->lastBound[pipelineBindPoint].uniqueBoundSets.insert(pDescriptorSets[i]); 7120 pSet->boundCmdBuffers.insert(commandBuffer); 7121 pCB->lastBound[pipelineBindPoint].pipelineLayout = layout; 7122 pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pDescriptorSets[i]; 7123 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 7124 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__, 7125 DRAWSTATE_NONE, "DS", "DS %#" PRIxLEAST64 " bound on pipeline %s", 7126 (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint)); 7127 if (!pSet->pUpdateStructs && (pSet->descriptorCount != 0)) { 7128 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, 7129 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], 7130 __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS", 7131 "DS %#" PRIxLEAST64 7132 " bound but it was never updated. You may want to either update it or not bind it.", 7133 (uint64_t)pDescriptorSets[i]); 7134 } 7135 // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout 7136 if (!verify_set_layout_compatibility(dev_data, pSet, layout, i + firstSet, errorString)) { 7137 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 7138 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], 7139 __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS", 7140 "descriptorSet #%u being bound is not compatible with overlapping layout in " 7141 "pipelineLayout due to: %s", 7142 i, errorString.c_str()); 7143 } 7144 if (pSet->pLayout->dynamicDescriptorCount) { 7145 // First make sure we won't overstep bounds of pDynamicOffsets array 7146 if ((totalDynamicDescriptors + pSet->pLayout->dynamicDescriptorCount) > dynamicOffsetCount) { 7147 skipCall |= 7148 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 7149 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__, 7150 DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS", 7151 "descriptorSet #%u (%#" PRIxLEAST64 7152 ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets " 7153 "array. There must be one dynamic offset for each dynamic descriptor being bound.", 7154 i, (uint64_t)pDescriptorSets[i], pSet->pLayout->dynamicDescriptorCount, 7155 (dynamicOffsetCount - totalDynamicDescriptors)); 7156 } else { // Validate and store dynamic offsets with the set 7157 // Validate Dynamic Offset Minimums 7158 uint32_t cur_dyn_offset = totalDynamicDescriptors; 7159 for (uint32_t d = 0; d < pSet->descriptorCount; d++) { 7160 if (pSet->pLayout->descriptorTypes[d] == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) { 7161 if (vk_safe_modulo( 7162 pDynamicOffsets[cur_dyn_offset], 7163 dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) { 7164 skipCall |= log_msg( 7165 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 7166 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, 7167 DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS", 7168 "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of " 7169 "device limit minUniformBufferOffsetAlignment %#" PRIxLEAST64, 7170 cur_dyn_offset, pDynamicOffsets[cur_dyn_offset], 7171 dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment); 7172 } 7173 cur_dyn_offset++; 7174 } else if (pSet->pLayout->descriptorTypes[d] == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) { 7175 if (vk_safe_modulo( 7176 pDynamicOffsets[cur_dyn_offset], 7177 dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) { 7178 skipCall |= log_msg( 7179 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 7180 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, 7181 DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS", 7182 "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of " 7183 "device limit minStorageBufferOffsetAlignment %#" PRIxLEAST64, 7184 cur_dyn_offset, pDynamicOffsets[cur_dyn_offset], 7185 dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment); 7186 } 7187 cur_dyn_offset++; 7188 } 7189 } 7190 // Keep running total of dynamic descriptor count to verify at the end 7191 totalDynamicDescriptors += pSet->pLayout->dynamicDescriptorCount; 7192 } 7193 } 7194 } else { 7195 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 7196 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__, 7197 DRAWSTATE_INVALID_SET, "DS", "Attempt to bind DS %#" PRIxLEAST64 " that doesn't exist!", 7198 (uint64_t)pDescriptorSets[i]); 7199 } 7200 skipCall |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()"); 7201 // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update 7202 if (firstSet > 0) { // Check set #s below the first bound set 7203 for (uint32_t i = 0; i < firstSet; ++i) { 7204 if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] && 7205 !verify_set_layout_compatibility( 7206 dev_data, dev_data->setMap[pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i]], layout, i, 7207 errorString)) { 7208 skipCall |= log_msg( 7209 dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, 7210 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, 7211 (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS", 7212 "DescriptorSetDS %#" PRIxLEAST64 7213 " previously bound as set #%u was disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")", 7214 (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout); 7215 pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE; 7216 } 7217 } 7218 } 7219 // Check if newly last bound set invalidates any remaining bound sets 7220 if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) { 7221 if (oldFinalBoundSet && 7222 !verify_set_layout_compatibility(dev_data, dev_data->setMap[oldFinalBoundSet], layout, lastSetIndex, 7223 errorString)) { 7224 skipCall |= 7225 log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, 7226 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)oldFinalBoundSet, __LINE__, 7227 DRAWSTATE_NONE, "DS", "DescriptorSetDS %#" PRIxLEAST64 7228 " previously bound as set #%u is incompatible with set %#" PRIxLEAST64 7229 " newly bound as set #%u so set #%u and any subsequent sets were " 7230 "disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")", 7231 (uint64_t)oldFinalBoundSet, lastSetIndex, 7232 (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex, 7233 lastSetIndex + 1, (uint64_t)layout); 7234 pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1); 7235 } 7236 } 7237 } 7238 // dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound 7239 if (totalDynamicDescriptors != dynamicOffsetCount) { 7240 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 7241 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__, 7242 DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS", 7243 "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount " 7244 "is %u. It should exactly match the number of dynamic descriptors.", 7245 setCount, totalDynamicDescriptors, dynamicOffsetCount); 7246 } 7247 // Save dynamicOffsets bound to this CB 7248 for (uint32_t i = 0; i < dynamicOffsetCount; i++) { 7249 pCB->lastBound[pipelineBindPoint].dynamicOffsets.emplace_back(pDynamicOffsets[i]); 7250 } 7251 } else { 7252 skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()"); 7253 } 7254 } 7255 lock.unlock(); 7256 if (!skipCall) 7257 dev_data->device_dispatch_table->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount, 7258 pDescriptorSets, dynamicOffsetCount, pDynamicOffsets); 7259} 7260 7261VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 7262vkCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) { 7263 bool skipCall = false; 7264 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7265 std::unique_lock<std::mutex> lock(global_lock); 7266#if MTMERGESOURCE 7267 VkDeviceMemory mem; 7268 skipCall = 7269 get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem); 7270 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 7271 if (cb_data != dev_data->commandBufferMap.end()) { 7272 std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindIndexBuffer()"); }; 7273 cb_data->second->validate_functions.push_back(function); 7274 } 7275 // TODO : Somewhere need to verify that IBs have correct usage state flagged 7276#endif 7277 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7278 if (pCB) { 7279 skipCall |= addCmd(dev_data, pCB, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()"); 7280 VkDeviceSize offset_align = 0; 7281 switch (indexType) { 7282 case VK_INDEX_TYPE_UINT16: 7283 offset_align = 2; 7284 break; 7285 case VK_INDEX_TYPE_UINT32: 7286 offset_align = 4; 7287 break; 7288 default: 7289 // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0 7290 break; 7291 } 7292 if (!offset_align || (offset % offset_align)) { 7293 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7294 DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS", 7295 "vkCmdBindIndexBuffer() offset (%#" PRIxLEAST64 ") does not fall on alignment (%s) boundary.", 7296 offset, string_VkIndexType(indexType)); 7297 } 7298 pCB->status |= CBSTATUS_INDEX_BUFFER_BOUND; 7299 } 7300 lock.unlock(); 7301 if (!skipCall) 7302 dev_data->device_dispatch_table->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType); 7303} 7304 7305void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) { 7306 uint32_t end = firstBinding + bindingCount; 7307 if (pCB->currentDrawData.buffers.size() < end) { 7308 pCB->currentDrawData.buffers.resize(end); 7309 } 7310 for (uint32_t i = 0; i < bindingCount; ++i) { 7311 pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i]; 7312 } 7313} 7314 7315static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); } 7316 7317VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, 7318 uint32_t bindingCount, const VkBuffer *pBuffers, 7319 const VkDeviceSize *pOffsets) { 7320 bool skipCall = false; 7321 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7322 std::unique_lock<std::mutex> lock(global_lock); 7323#if MTMERGESOURCE 7324 for (uint32_t i = 0; i < bindingCount; ++i) { 7325 VkDeviceMemory mem; 7326 skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)pBuffers[i], VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem); 7327 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 7328 if (cb_data != dev_data->commandBufferMap.end()) { 7329 std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindVertexBuffers()"); }; 7330 cb_data->second->validate_functions.push_back(function); 7331 } 7332 } 7333 // TODO : Somewhere need to verify that VBs have correct usage state flagged 7334#endif 7335 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7336 if (pCB) { 7337 addCmd(dev_data, pCB, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()"); 7338 updateResourceTracking(pCB, firstBinding, bindingCount, pBuffers); 7339 } else { 7340 skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()"); 7341 } 7342 lock.unlock(); 7343 if (!skipCall) 7344 dev_data->device_dispatch_table->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets); 7345} 7346 7347/* expects global_lock to be held by caller */ 7348static bool markStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) { 7349 bool skip_call = false; 7350 7351 for (auto imageView : pCB->updateImages) { 7352 auto iv_data = dev_data->imageViewMap.find(imageView); 7353 if (iv_data == dev_data->imageViewMap.end()) 7354 continue; 7355 VkImage image = iv_data->second.image; 7356 VkDeviceMemory mem; 7357 skip_call |= 7358 get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem); 7359 std::function<bool()> function = [=]() { 7360 set_memory_valid(dev_data, mem, true, image); 7361 return false; 7362 }; 7363 pCB->validate_functions.push_back(function); 7364 } 7365 for (auto buffer : pCB->updateBuffers) { 7366 VkDeviceMemory mem; 7367 skip_call |= get_mem_binding_from_object(dev_data, (uint64_t)buffer, 7368 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem); 7369 std::function<bool()> function = [=]() { 7370 set_memory_valid(dev_data, mem, true); 7371 return false; 7372 }; 7373 pCB->validate_functions.push_back(function); 7374 } 7375 return skip_call; 7376} 7377 7378VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, 7379 uint32_t firstVertex, uint32_t firstInstance) { 7380 bool skipCall = false; 7381 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7382 std::unique_lock<std::mutex> lock(global_lock); 7383 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7384 if (pCB) { 7385 skipCall |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()"); 7386 pCB->drawCount[DRAW]++; 7387 skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS); 7388 skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB); 7389 // TODO : Need to pass commandBuffer as srcObj here 7390 skipCall |= 7391 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 7392 __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW]++); 7393 skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer); 7394 if (!skipCall) { 7395 updateResourceTrackingOnDraw(pCB); 7396 } 7397 skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDraw"); 7398 } 7399 lock.unlock(); 7400 if (!skipCall) 7401 dev_data->device_dispatch_table->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance); 7402} 7403 7404VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, 7405 uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, 7406 uint32_t firstInstance) { 7407 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7408 bool skipCall = false; 7409 std::unique_lock<std::mutex> lock(global_lock); 7410 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7411 if (pCB) { 7412 skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()"); 7413 pCB->drawCount[DRAW_INDEXED]++; 7414 skipCall |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS); 7415 skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB); 7416 // TODO : Need to pass commandBuffer as srcObj here 7417 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 7418 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS", 7419 "vkCmdDrawIndexed() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++); 7420 skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer); 7421 if (!skipCall) { 7422 updateResourceTrackingOnDraw(pCB); 7423 } 7424 skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed"); 7425 } 7426 lock.unlock(); 7427 if (!skipCall) 7428 dev_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, 7429 firstInstance); 7430} 7431 7432VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 7433vkCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) { 7434 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7435 bool skipCall = false; 7436 std::unique_lock<std::mutex> lock(global_lock); 7437#if MTMERGESOURCE 7438 VkDeviceMemory mem; 7439 // MTMTODO : merge with code below 7440 skipCall = 7441 get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem); 7442 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndirect"); 7443#endif 7444 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7445 if (pCB) { 7446 skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()"); 7447 pCB->drawCount[DRAW_INDIRECT]++; 7448 skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS); 7449 skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB); 7450 // TODO : Need to pass commandBuffer as srcObj here 7451 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 7452 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS", 7453 "vkCmdDrawIndirect() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++); 7454 skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer); 7455 if (!skipCall) { 7456 updateResourceTrackingOnDraw(pCB); 7457 } 7458 skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndirect"); 7459 } 7460 lock.unlock(); 7461 if (!skipCall) 7462 dev_data->device_dispatch_table->CmdDrawIndirect(commandBuffer, buffer, offset, count, stride); 7463} 7464 7465VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 7466vkCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) { 7467 bool skipCall = false; 7468 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7469 std::unique_lock<std::mutex> lock(global_lock); 7470#if MTMERGESOURCE 7471 VkDeviceMemory mem; 7472 // MTMTODO : merge with code below 7473 skipCall = 7474 get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem); 7475 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndexedIndirect"); 7476#endif 7477 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7478 if (pCB) { 7479 skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()"); 7480 pCB->drawCount[DRAW_INDEXED_INDIRECT]++; 7481 skipCall |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS); 7482 skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB); 7483 // TODO : Need to pass commandBuffer as srcObj here 7484 skipCall |= 7485 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 7486 __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call #%" PRIu64 ", reporting DS state:", 7487 g_drawCount[DRAW_INDEXED_INDIRECT]++); 7488 skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer); 7489 if (!skipCall) { 7490 updateResourceTrackingOnDraw(pCB); 7491 } 7492 skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexedIndirect"); 7493 } 7494 lock.unlock(); 7495 if (!skipCall) 7496 dev_data->device_dispatch_table->CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride); 7497} 7498 7499VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) { 7500 bool skipCall = false; 7501 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7502 std::unique_lock<std::mutex> lock(global_lock); 7503 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7504 if (pCB) { 7505 // TODO : Re-enable validate_and_update_draw_state() when it supports compute shaders 7506 // skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE); 7507 // TODO : Call below is temporary until call above can be re-enabled 7508 update_shader_storage_images_and_buffers(dev_data, pCB); 7509 skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB); 7510 skipCall |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()"); 7511 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatch"); 7512 } 7513 lock.unlock(); 7514 if (!skipCall) 7515 dev_data->device_dispatch_table->CmdDispatch(commandBuffer, x, y, z); 7516} 7517 7518VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 7519vkCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) { 7520 bool skipCall = false; 7521 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7522 std::unique_lock<std::mutex> lock(global_lock); 7523#if MTMERGESOURCE 7524 VkDeviceMemory mem; 7525 skipCall = 7526 get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem); 7527 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDispatchIndirect"); 7528#endif 7529 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7530 if (pCB) { 7531 // TODO : Re-enable validate_and_update_draw_state() when it supports compute shaders 7532 // skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE); 7533 // TODO : Call below is temporary until call above can be re-enabled 7534 update_shader_storage_images_and_buffers(dev_data, pCB); 7535 skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB); 7536 skipCall |= addCmd(dev_data, pCB, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()"); 7537 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatchIndirect"); 7538 } 7539 lock.unlock(); 7540 if (!skipCall) 7541 dev_data->device_dispatch_table->CmdDispatchIndirect(commandBuffer, buffer, offset); 7542} 7543 7544VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, 7545 uint32_t regionCount, const VkBufferCopy *pRegions) { 7546 bool skipCall = false; 7547 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7548 std::unique_lock<std::mutex> lock(global_lock); 7549#if MTMERGESOURCE 7550 VkDeviceMemory mem; 7551 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 7552 skipCall = 7553 get_mem_binding_from_object(dev_data, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem); 7554 if (cb_data != dev_data->commandBufferMap.end()) { 7555 std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyBuffer()"); }; 7556 cb_data->second->validate_functions.push_back(function); 7557 } 7558 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBuffer"); 7559 skipCall |= 7560 get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem); 7561 if (cb_data != dev_data->commandBufferMap.end()) { 7562 std::function<bool()> function = [=]() { 7563 set_memory_valid(dev_data, mem, true); 7564 return false; 7565 }; 7566 cb_data->second->validate_functions.push_back(function); 7567 } 7568 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBuffer"); 7569 // Validate that SRC & DST buffers have correct usage flags set 7570 skipCall |= validate_buffer_usage_flags(dev_data, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, 7571 "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT"); 7572 skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, 7573 "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); 7574#endif 7575 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7576 if (pCB) { 7577 skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFER, "vkCmdCopyBuffer()"); 7578 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBuffer"); 7579 } 7580 lock.unlock(); 7581 if (!skipCall) 7582 dev_data->device_dispatch_table->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions); 7583} 7584 7585static bool VerifySourceImageLayout(VkCommandBuffer cmdBuffer, VkImage srcImage, VkImageSubresourceLayers subLayers, 7586 VkImageLayout srcImageLayout) { 7587 bool skip_call = false; 7588 7589 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map); 7590 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer); 7591 for (uint32_t i = 0; i < subLayers.layerCount; ++i) { 7592 uint32_t layer = i + subLayers.baseArrayLayer; 7593 VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer}; 7594 IMAGE_CMD_BUF_LAYOUT_NODE node; 7595 if (!FindLayout(pCB, srcImage, sub, node)) { 7596 SetLayout(pCB, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout)); 7597 continue; 7598 } 7599 if (node.layout != srcImageLayout) { 7600 // TODO: Improve log message in the next pass 7601 skip_call |= 7602 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 7603 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s " 7604 "and doesn't match the current layout %s.", 7605 string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout)); 7606 } 7607 } 7608 if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) { 7609 if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) { 7610 // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning. 7611 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 7612 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 7613 "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL."); 7614 } else { 7615 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7616 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be " 7617 "TRANSFER_SRC_OPTIMAL or GENERAL.", 7618 string_VkImageLayout(srcImageLayout)); 7619 } 7620 } 7621 return skip_call; 7622} 7623 7624static bool VerifyDestImageLayout(VkCommandBuffer cmdBuffer, VkImage destImage, VkImageSubresourceLayers subLayers, 7625 VkImageLayout destImageLayout) { 7626 bool skip_call = false; 7627 7628 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map); 7629 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer); 7630 for (uint32_t i = 0; i < subLayers.layerCount; ++i) { 7631 uint32_t layer = i + subLayers.baseArrayLayer; 7632 VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer}; 7633 IMAGE_CMD_BUF_LAYOUT_NODE node; 7634 if (!FindLayout(pCB, destImage, sub, node)) { 7635 SetLayout(pCB, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout)); 7636 continue; 7637 } 7638 if (node.layout != destImageLayout) { 7639 skip_call |= 7640 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 7641 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and " 7642 "doesn't match the current layout %s.", 7643 string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout)); 7644 } 7645 } 7646 if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) { 7647 if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) { 7648 // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning. 7649 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 7650 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 7651 "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL."); 7652 } else { 7653 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 7654 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be " 7655 "TRANSFER_DST_OPTIMAL or GENERAL.", 7656 string_VkImageLayout(destImageLayout)); 7657 } 7658 } 7659 return skip_call; 7660} 7661 7662VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 7663vkCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, 7664 VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) { 7665 bool skipCall = false; 7666 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7667 std::unique_lock<std::mutex> lock(global_lock); 7668#if MTMERGESOURCE 7669 VkDeviceMemory mem; 7670 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 7671 // Validate that src & dst images have correct usage flags set 7672 skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem); 7673 if (cb_data != dev_data->commandBufferMap.end()) { 7674 std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyImage()", srcImage); }; 7675 cb_data->second->validate_functions.push_back(function); 7676 } 7677 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImage"); 7678 skipCall |= 7679 get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem); 7680 if (cb_data != dev_data->commandBufferMap.end()) { 7681 std::function<bool()> function = [=]() { 7682 set_memory_valid(dev_data, mem, true, dstImage); 7683 return false; 7684 }; 7685 cb_data->second->validate_functions.push_back(function); 7686 } 7687 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImage"); 7688 skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, 7689 "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT"); 7690 skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, 7691 "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT"); 7692#endif 7693 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7694 if (pCB) { 7695 skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGE, "vkCmdCopyImage()"); 7696 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImage"); 7697 for (uint32_t i = 0; i < regionCount; ++i) { 7698 skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].srcSubresource, srcImageLayout); 7699 skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].dstSubresource, dstImageLayout); 7700 } 7701 } 7702 lock.unlock(); 7703 if (!skipCall) 7704 dev_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, 7705 regionCount, pRegions); 7706} 7707 7708VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 7709vkCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, 7710 VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) { 7711 bool skipCall = false; 7712 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7713 std::unique_lock<std::mutex> lock(global_lock); 7714#if MTMERGESOURCE 7715 VkDeviceMemory mem; 7716 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 7717 // Validate that src & dst images have correct usage flags set 7718 skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem); 7719 if (cb_data != dev_data->commandBufferMap.end()) { 7720 std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBlitImage()", srcImage); }; 7721 cb_data->second->validate_functions.push_back(function); 7722 } 7723 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdBlitImage"); 7724 skipCall |= 7725 get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem); 7726 if (cb_data != dev_data->commandBufferMap.end()) { 7727 std::function<bool()> function = [=]() { 7728 set_memory_valid(dev_data, mem, true, dstImage); 7729 return false; 7730 }; 7731 cb_data->second->validate_functions.push_back(function); 7732 } 7733 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdBlitImage"); 7734 skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, 7735 "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT"); 7736 skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, 7737 "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT"); 7738#endif 7739 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7740 if (pCB) { 7741 skipCall |= addCmd(dev_data, pCB, CMD_BLITIMAGE, "vkCmdBlitImage()"); 7742 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBlitImage"); 7743 } 7744 lock.unlock(); 7745 if (!skipCall) 7746 dev_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, 7747 regionCount, pRegions, filter); 7748} 7749 7750VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, 7751 VkImage dstImage, VkImageLayout dstImageLayout, 7752 uint32_t regionCount, const VkBufferImageCopy *pRegions) { 7753 bool skipCall = false; 7754 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7755 std::unique_lock<std::mutex> lock(global_lock); 7756#if MTMERGESOURCE 7757 VkDeviceMemory mem; 7758 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 7759 skipCall = get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem); 7760 if (cb_data != dev_data->commandBufferMap.end()) { 7761 std::function<bool()> function = [=]() { 7762 set_memory_valid(dev_data, mem, true, dstImage); 7763 return false; 7764 }; 7765 cb_data->second->validate_functions.push_back(function); 7766 } 7767 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBufferToImage"); 7768 skipCall |= 7769 get_mem_binding_from_object(dev_data, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem); 7770 if (cb_data != dev_data->commandBufferMap.end()) { 7771 std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyBufferToImage()"); }; 7772 cb_data->second->validate_functions.push_back(function); 7773 } 7774 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBufferToImage"); 7775 // Validate that src buff & dst image have correct usage flags set 7776 skipCall |= validate_buffer_usage_flags(dev_data, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, 7777 "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT"); 7778 skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, 7779 "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT"); 7780#endif 7781 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7782 if (pCB) { 7783 skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()"); 7784 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBufferToImage"); 7785 for (uint32_t i = 0; i < regionCount; ++i) { 7786 skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].imageSubresource, dstImageLayout); 7787 } 7788 } 7789 lock.unlock(); 7790 if (!skipCall) 7791 dev_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, 7792 pRegions); 7793} 7794 7795VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, 7796 VkImageLayout srcImageLayout, VkBuffer dstBuffer, 7797 uint32_t regionCount, const VkBufferImageCopy *pRegions) { 7798 bool skipCall = false; 7799 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7800 std::unique_lock<std::mutex> lock(global_lock); 7801#if MTMERGESOURCE 7802 VkDeviceMemory mem; 7803 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 7804 skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem); 7805 if (cb_data != dev_data->commandBufferMap.end()) { 7806 std::function<bool()> function = [=]() { 7807 return validate_memory_is_valid(dev_data, mem, "vkCmdCopyImageToBuffer()", srcImage); 7808 }; 7809 cb_data->second->validate_functions.push_back(function); 7810 } 7811 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImageToBuffer"); 7812 skipCall |= 7813 get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem); 7814 if (cb_data != dev_data->commandBufferMap.end()) { 7815 std::function<bool()> function = [=]() { 7816 set_memory_valid(dev_data, mem, true); 7817 return false; 7818 }; 7819 cb_data->second->validate_functions.push_back(function); 7820 } 7821 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImageToBuffer"); 7822 // Validate that dst buff & src image have correct usage flags set 7823 skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, 7824 "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT"); 7825 skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, 7826 "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); 7827#endif 7828 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7829 if (pCB) { 7830 skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()"); 7831 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImageToBuffer"); 7832 for (uint32_t i = 0; i < regionCount; ++i) { 7833 skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].imageSubresource, srcImageLayout); 7834 } 7835 } 7836 lock.unlock(); 7837 if (!skipCall) 7838 dev_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, 7839 pRegions); 7840} 7841 7842VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, 7843 VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) { 7844 bool skipCall = false; 7845 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7846 std::unique_lock<std::mutex> lock(global_lock); 7847#if MTMERGESOURCE 7848 VkDeviceMemory mem; 7849 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 7850 skipCall = 7851 get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem); 7852 if (cb_data != dev_data->commandBufferMap.end()) { 7853 std::function<bool()> function = [=]() { 7854 set_memory_valid(dev_data, mem, true); 7855 return false; 7856 }; 7857 cb_data->second->validate_functions.push_back(function); 7858 } 7859 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdUpdateBuffer"); 7860 // Validate that dst buff has correct usage flags set 7861 skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, 7862 "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); 7863#endif 7864 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7865 if (pCB) { 7866 skipCall |= addCmd(dev_data, pCB, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()"); 7867 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyUpdateBuffer"); 7868 } 7869 lock.unlock(); 7870 if (!skipCall) 7871 dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData); 7872} 7873 7874VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 7875vkCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) { 7876 bool skipCall = false; 7877 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7878 std::unique_lock<std::mutex> lock(global_lock); 7879#if MTMERGESOURCE 7880 VkDeviceMemory mem; 7881 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 7882 skipCall = 7883 get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem); 7884 if (cb_data != dev_data->commandBufferMap.end()) { 7885 std::function<bool()> function = [=]() { 7886 set_memory_valid(dev_data, mem, true); 7887 return false; 7888 }; 7889 cb_data->second->validate_functions.push_back(function); 7890 } 7891 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdFillBuffer"); 7892 // Validate that dst buff has correct usage flags set 7893 skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, 7894 "vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); 7895#endif 7896 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7897 if (pCB) { 7898 skipCall |= addCmd(dev_data, pCB, CMD_FILLBUFFER, "vkCmdFillBuffer()"); 7899 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyFillBuffer"); 7900 } 7901 lock.unlock(); 7902 if (!skipCall) 7903 dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data); 7904} 7905 7906VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount, 7907 const VkClearAttachment *pAttachments, uint32_t rectCount, 7908 const VkClearRect *pRects) { 7909 bool skipCall = false; 7910 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7911 std::unique_lock<std::mutex> lock(global_lock); 7912 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7913 if (pCB) { 7914 skipCall |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()"); 7915 // Warn if this is issued prior to Draw Cmd and clearing the entire attachment 7916 if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) && 7917 (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) { 7918 // TODO : commandBuffer should be srcObj 7919 // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass) 7920 // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must 7921 // call CmdClearAttachments 7922 // Otherwise this seems more like a performance warning. 7923 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, 7924 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS", 7925 "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds." 7926 " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.", 7927 (uint64_t)(commandBuffer)); 7928 } 7929 skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments"); 7930 } 7931 7932 // Validate that attachment is in reference list of active subpass 7933 if (pCB->activeRenderPass) { 7934 const VkRenderPassCreateInfo *pRPCI = dev_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo; 7935 const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass]; 7936 7937 for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) { 7938 const VkClearAttachment *attachment = &pAttachments[attachment_idx]; 7939 if (attachment->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) { 7940 bool found = false; 7941 for (uint32_t i = 0; i < pSD->colorAttachmentCount; i++) { 7942 if (attachment->colorAttachment == pSD->pColorAttachments[i].attachment) { 7943 found = true; 7944 break; 7945 } 7946 } 7947 if (!found) { 7948 skipCall |= log_msg( 7949 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 7950 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS", 7951 "vkCmdClearAttachments() attachment index %d not found in attachment reference array of active subpass %d", 7952 attachment->colorAttachment, pCB->activeSubpass); 7953 } 7954 } else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) { 7955 if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass 7956 (pSD->pDepthStencilAttachment->attachment == 7957 VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass 7958 7959 skipCall |= log_msg( 7960 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 7961 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS", 7962 "vkCmdClearAttachments() attachment index %d does not match depthStencilAttachment.attachment (%d) found " 7963 "in active subpass %d", 7964 attachment->colorAttachment, 7965 (pSD->pDepthStencilAttachment) ? pSD->pDepthStencilAttachment->attachment : VK_ATTACHMENT_UNUSED, 7966 pCB->activeSubpass); 7967 } 7968 } 7969 } 7970 } 7971 lock.unlock(); 7972 if (!skipCall) 7973 dev_data->device_dispatch_table->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects); 7974} 7975 7976VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, 7977 VkImageLayout imageLayout, const VkClearColorValue *pColor, 7978 uint32_t rangeCount, const VkImageSubresourceRange *pRanges) { 7979 bool skipCall = false; 7980 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 7981 std::unique_lock<std::mutex> lock(global_lock); 7982#if MTMERGESOURCE 7983 // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state 7984 VkDeviceMemory mem; 7985 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 7986 skipCall = get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem); 7987 if (cb_data != dev_data->commandBufferMap.end()) { 7988 std::function<bool()> function = [=]() { 7989 set_memory_valid(dev_data, mem, true, image); 7990 return false; 7991 }; 7992 cb_data->second->validate_functions.push_back(function); 7993 } 7994 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearColorImage"); 7995#endif 7996 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 7997 if (pCB) { 7998 skipCall |= addCmd(dev_data, pCB, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()"); 7999 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearColorImage"); 8000 } 8001 lock.unlock(); 8002 if (!skipCall) 8003 dev_data->device_dispatch_table->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges); 8004} 8005 8006VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 8007vkCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, 8008 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount, 8009 const VkImageSubresourceRange *pRanges) { 8010 bool skipCall = false; 8011 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8012 std::unique_lock<std::mutex> lock(global_lock); 8013#if MTMERGESOURCE 8014 // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state 8015 VkDeviceMemory mem; 8016 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 8017 skipCall = get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem); 8018 if (cb_data != dev_data->commandBufferMap.end()) { 8019 std::function<bool()> function = [=]() { 8020 set_memory_valid(dev_data, mem, true, image); 8021 return false; 8022 }; 8023 cb_data->second->validate_functions.push_back(function); 8024 } 8025 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearDepthStencilImage"); 8026#endif 8027 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8028 if (pCB) { 8029 skipCall |= addCmd(dev_data, pCB, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()"); 8030 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearDepthStencilImage"); 8031 } 8032 lock.unlock(); 8033 if (!skipCall) 8034 dev_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, 8035 pRanges); 8036} 8037 8038VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 8039vkCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, 8040 VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) { 8041 bool skipCall = false; 8042 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8043 std::unique_lock<std::mutex> lock(global_lock); 8044#if MTMERGESOURCE 8045 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 8046 VkDeviceMemory mem; 8047 skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem); 8048 if (cb_data != dev_data->commandBufferMap.end()) { 8049 std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdResolveImage()", srcImage); }; 8050 cb_data->second->validate_functions.push_back(function); 8051 } 8052 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdResolveImage"); 8053 skipCall |= 8054 get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem); 8055 if (cb_data != dev_data->commandBufferMap.end()) { 8056 std::function<bool()> function = [=]() { 8057 set_memory_valid(dev_data, mem, true, dstImage); 8058 return false; 8059 }; 8060 cb_data->second->validate_functions.push_back(function); 8061 } 8062 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdResolveImage"); 8063#endif 8064 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8065 if (pCB) { 8066 skipCall |= addCmd(dev_data, pCB, CMD_RESOLVEIMAGE, "vkCmdResolveImage()"); 8067 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResolveImage"); 8068 } 8069 lock.unlock(); 8070 if (!skipCall) 8071 dev_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, 8072 regionCount, pRegions); 8073} 8074 8075bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) { 8076 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8077 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8078 if (pCB) { 8079 pCB->eventToStageMap[event] = stageMask; 8080 } 8081 auto queue_data = dev_data->queueMap.find(queue); 8082 if (queue_data != dev_data->queueMap.end()) { 8083 queue_data->second.eventToStageMap[event] = stageMask; 8084 } 8085 return false; 8086} 8087 8088VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 8089vkCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) { 8090 bool skipCall = false; 8091 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8092 std::unique_lock<std::mutex> lock(global_lock); 8093 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8094 if (pCB) { 8095 skipCall |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()"); 8096 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent"); 8097 pCB->events.push_back(event); 8098 std::function<bool(VkQueue)> eventUpdate = 8099 std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask); 8100 pCB->eventUpdates.push_back(eventUpdate); 8101 } 8102 lock.unlock(); 8103 if (!skipCall) 8104 dev_data->device_dispatch_table->CmdSetEvent(commandBuffer, event, stageMask); 8105} 8106 8107VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 8108vkCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) { 8109 bool skipCall = false; 8110 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8111 std::unique_lock<std::mutex> lock(global_lock); 8112 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8113 if (pCB) { 8114 skipCall |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()"); 8115 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent"); 8116 pCB->events.push_back(event); 8117 std::function<bool(VkQueue)> eventUpdate = 8118 std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0)); 8119 pCB->eventUpdates.push_back(eventUpdate); 8120 } 8121 lock.unlock(); 8122 if (!skipCall) 8123 dev_data->device_dispatch_table->CmdResetEvent(commandBuffer, event, stageMask); 8124} 8125 8126static bool TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount, 8127 const VkImageMemoryBarrier *pImgMemBarriers) { 8128 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map); 8129 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer); 8130 bool skip = false; 8131 uint32_t levelCount = 0; 8132 uint32_t layerCount = 0; 8133 8134 for (uint32_t i = 0; i < memBarrierCount; ++i) { 8135 auto mem_barrier = &pImgMemBarriers[i]; 8136 if (!mem_barrier) 8137 continue; 8138 // TODO: Do not iterate over every possibility - consolidate where 8139 // possible 8140 ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image); 8141 8142 for (uint32_t j = 0; j < levelCount; j++) { 8143 uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j; 8144 for (uint32_t k = 0; k < layerCount; k++) { 8145 uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k; 8146 VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer}; 8147 IMAGE_CMD_BUF_LAYOUT_NODE node; 8148 if (!FindLayout(pCB, mem_barrier->image, sub, node)) { 8149 SetLayout(pCB, mem_barrier->image, sub, 8150 IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout)); 8151 continue; 8152 } 8153 if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) { 8154 // TODO: Set memory invalid which is in mem_tracker currently 8155 } else if (node.layout != mem_barrier->oldLayout) { 8156 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 8157 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s " 8158 "when current layout is %s.", 8159 string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout)); 8160 } 8161 SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout); 8162 } 8163 } 8164 } 8165 return skip; 8166} 8167 8168// Print readable FlagBits in FlagMask 8169static std::string string_VkAccessFlags(VkAccessFlags accessMask) { 8170 std::string result; 8171 std::string separator; 8172 8173 if (accessMask == 0) { 8174 result = "[None]"; 8175 } else { 8176 result = "["; 8177 for (auto i = 0; i < 32; i++) { 8178 if (accessMask & (1 << i)) { 8179 result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i)); 8180 separator = " | "; 8181 } 8182 } 8183 result = result + "]"; 8184 } 8185 return result; 8186} 8187 8188// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set. 8189// If required_bit is zero, accessMask must have at least one of 'optional_bits' set 8190// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions 8191static bool ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask, 8192 const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits, 8193 const char *type) { 8194 bool skip_call = false; 8195 8196 if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) { 8197 if (accessMask & !(required_bit | optional_bits)) { 8198 // TODO: Verify against Valid Use 8199 skip_call |= 8200 log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8201 DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.", 8202 type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout)); 8203 } 8204 } else { 8205 if (!required_bit) { 8206 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8207 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d " 8208 "%s when layout is %s, unless the app has previously added a " 8209 "barrier for this transition.", 8210 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits, 8211 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout)); 8212 } else { 8213 std::string opt_bits; 8214 if (optional_bits != 0) { 8215 std::stringstream ss; 8216 ss << optional_bits; 8217 opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits); 8218 } 8219 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8220 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when " 8221 "layout is %s, unless the app has previously added a barrier for " 8222 "this transition.", 8223 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit, 8224 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout)); 8225 } 8226 } 8227 return skip_call; 8228} 8229 8230static bool ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask, 8231 const VkImageLayout &layout, const char *type) { 8232 bool skip_call = false; 8233 switch (layout) { 8234 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: { 8235 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, 8236 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type); 8237 break; 8238 } 8239 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: { 8240 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, 8241 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type); 8242 break; 8243 } 8244 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: { 8245 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type); 8246 break; 8247 } 8248 case VK_IMAGE_LAYOUT_PREINITIALIZED: { 8249 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type); 8250 break; 8251 } 8252 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: { 8253 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0, 8254 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type); 8255 break; 8256 } 8257 case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: { 8258 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0, 8259 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type); 8260 break; 8261 } 8262 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: { 8263 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type); 8264 break; 8265 } 8266 case VK_IMAGE_LAYOUT_UNDEFINED: { 8267 if (accessMask != 0) { 8268 // TODO: Verify against Valid Use section spec 8269 skip_call |= 8270 log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8271 DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.", 8272 type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout)); 8273 } 8274 break; 8275 } 8276 case VK_IMAGE_LAYOUT_GENERAL: 8277 default: { break; } 8278 } 8279 return skip_call; 8280} 8281 8282static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount, 8283 const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount, 8284 const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount, 8285 const VkImageMemoryBarrier *pImageMemBarriers) { 8286 bool skip_call = false; 8287 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map); 8288 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer); 8289 if (pCB->activeRenderPass && memBarrierCount) { 8290 if (!dev_data->renderPassMap[pCB->activeRenderPass]->hasSelfDependency[pCB->activeSubpass]) { 8291 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8292 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d " 8293 "with no self dependency specified.", 8294 funcName, pCB->activeSubpass); 8295 } 8296 } 8297 for (uint32_t i = 0; i < imageMemBarrierCount; ++i) { 8298 auto mem_barrier = &pImageMemBarriers[i]; 8299 auto image_data = dev_data->imageMap.find(mem_barrier->image); 8300 if (image_data != dev_data->imageMap.end()) { 8301 uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex; 8302 uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex; 8303 if (image_data->second.createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) { 8304 // srcQueueFamilyIndex and dstQueueFamilyIndex must both 8305 // be VK_QUEUE_FAMILY_IGNORED 8306 if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) { 8307 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 8308 __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS", 8309 "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of " 8310 "VK_SHARING_MODE_CONCURRENT. Src and dst " 8311 " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.", 8312 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image)); 8313 } 8314 } else { 8315 // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and 8316 // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED, 8317 // or both be a valid queue family 8318 if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) && 8319 (src_q_f_index != dst_q_f_index)) { 8320 skip_call |= 8321 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8322 DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode " 8323 "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or " 8324 "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both " 8325 "must be.", 8326 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image)); 8327 } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) && 8328 ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) || 8329 (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) { 8330 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 8331 __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS", 8332 "%s: Image 0x%" PRIx64 " was created with sharingMode " 8333 "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d" 8334 " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER 8335 "queueFamilies crated for this device.", 8336 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index, 8337 dst_q_f_index, dev_data->phys_dev_properties.queue_family_properties.size()); 8338 } 8339 } 8340 } 8341 8342 if (mem_barrier) { 8343 skip_call |= 8344 ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source"); 8345 skip_call |= 8346 ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest"); 8347 if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) { 8348 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8349 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or " 8350 "PREINITIALIZED.", 8351 funcName); 8352 } 8353 auto image_data = dev_data->imageMap.find(mem_barrier->image); 8354 VkFormat format = VK_FORMAT_UNDEFINED; 8355 uint32_t arrayLayers = 0, mipLevels = 0; 8356 bool imageFound = false; 8357 if (image_data != dev_data->imageMap.end()) { 8358 format = image_data->second.createInfo.format; 8359 arrayLayers = image_data->second.createInfo.arrayLayers; 8360 mipLevels = image_data->second.createInfo.mipLevels; 8361 imageFound = true; 8362 } else if (dev_data->device_extensions.wsi_enabled) { 8363 auto imageswap_data = dev_data->device_extensions.imageToSwapchainMap.find(mem_barrier->image); 8364 if (imageswap_data != dev_data->device_extensions.imageToSwapchainMap.end()) { 8365 auto swapchain_data = dev_data->device_extensions.swapchainMap.find(imageswap_data->second); 8366 if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) { 8367 format = swapchain_data->second->createInfo.imageFormat; 8368 arrayLayers = swapchain_data->second->createInfo.imageArrayLayers; 8369 mipLevels = 1; 8370 imageFound = true; 8371 } 8372 } 8373 } 8374 if (imageFound) { 8375 if (vk_format_is_depth_and_stencil(format) && 8376 (!(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) || 8377 !(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT))) { 8378 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8379 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth and stencil format and thus must " 8380 "have both VK_IMAGE_ASPECT_DEPTH_BIT and " 8381 "VK_IMAGE_ASPECT_STENCIL_BIT set.", 8382 funcName); 8383 } 8384 int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS) 8385 ? 1 8386 : mem_barrier->subresourceRange.layerCount; 8387 if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) { 8388 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8389 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the " 8390 "baseArrayLayer (%d) and layerCount (%d) be less " 8391 "than or equal to the total number of layers (%d).", 8392 funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount, 8393 arrayLayers); 8394 } 8395 int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS) 8396 ? 1 8397 : mem_barrier->subresourceRange.levelCount; 8398 if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) { 8399 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8400 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel " 8401 "(%d) and levelCount (%d) be less than or equal to " 8402 "the total number of levels (%d).", 8403 funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount, 8404 mipLevels); 8405 } 8406 } 8407 } 8408 } 8409 for (uint32_t i = 0; i < bufferBarrierCount; ++i) { 8410 auto mem_barrier = &pBufferMemBarriers[i]; 8411 if (pCB->activeRenderPass) { 8412 skip_call |= 8413 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8414 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName); 8415 } 8416 if (!mem_barrier) 8417 continue; 8418 8419 // Validate buffer barrier queue family indices 8420 if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED && 8421 mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) || 8422 (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED && 8423 mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) { 8424 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8425 DRAWSTATE_INVALID_QUEUE_INDEX, "DS", 8426 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater " 8427 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.", 8428 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer), 8429 dev_data->phys_dev_properties.queue_family_properties.size()); 8430 } 8431 8432 auto buffer_data = dev_data->bufferMap.find(mem_barrier->buffer); 8433 if (buffer_data != dev_data->bufferMap.end()) { 8434 VkDeviceSize buffer_size = (buffer_data->second.createInfo.sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO) 8435 ? buffer_data->second.createInfo.size 8436 : 0; 8437 if (mem_barrier->offset >= buffer_size) { 8438 skip_call |= log_msg( 8439 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8440 DRAWSTATE_INVALID_BARRIER, "DS", 8441 "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64 " which is not less than total size %" PRIu64 ".", 8442 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer), 8443 reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(buffer_size)); 8444 } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) { 8445 skip_call |= log_msg( 8446 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8447 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64 " and size %" PRIu64 8448 " whose sum is greater than total size %" PRIu64 ".", 8449 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer), 8450 reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(mem_barrier->size), 8451 reinterpret_cast<const uint64_t &>(buffer_size)); 8452 } 8453 } 8454 } 8455 return skip_call; 8456} 8457 8458bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) { 8459 bool skip_call = false; 8460 VkPipelineStageFlags stageMask = 0; 8461 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map); 8462 for (uint32_t i = 0; i < eventCount; ++i) { 8463 auto event = pCB->events[firstEventIndex + i]; 8464 auto queue_data = dev_data->queueMap.find(queue); 8465 if (queue_data == dev_data->queueMap.end()) 8466 return false; 8467 auto event_data = queue_data->second.eventToStageMap.find(event); 8468 if (event_data != queue_data->second.eventToStageMap.end()) { 8469 stageMask |= event_data->second; 8470 } else { 8471 auto global_event_data = dev_data->eventMap.find(event); 8472 if (global_event_data == dev_data->eventMap.end()) { 8473 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT, 8474 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS", 8475 "Event 0x%" PRIx64 " cannot be waited on if it has never been set.", 8476 reinterpret_cast<const uint64_t &>(event)); 8477 } else { 8478 stageMask |= global_event_data->second.stageMask; 8479 } 8480 } 8481 } 8482 if (sourceStageMask != stageMask) { 8483 skip_call |= 8484 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8485 DRAWSTATE_INVALID_EVENT, "DS", 8486 "Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%x which must be the bitwise OR of the " 8487 "stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with vkSetEvent.", 8488 sourceStageMask); 8489 } 8490 return skip_call; 8491} 8492 8493VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 8494vkCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask, 8495 VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, 8496 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, 8497 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { 8498 bool skipCall = false; 8499 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8500 std::unique_lock<std::mutex> lock(global_lock); 8501 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8502 if (pCB) { 8503 auto firstEventIndex = pCB->events.size(); 8504 for (uint32_t i = 0; i < eventCount; ++i) { 8505 pCB->waitedEvents.push_back(pEvents[i]); 8506 pCB->events.push_back(pEvents[i]); 8507 } 8508 std::function<bool(VkQueue)> eventUpdate = 8509 std::bind(validateEventStageMask, std::placeholders::_1, pCB, eventCount, firstEventIndex, sourceStageMask); 8510 pCB->eventUpdates.push_back(eventUpdate); 8511 if (pCB->state == CB_RECORDING) { 8512 skipCall |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()"); 8513 } else { 8514 skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()"); 8515 } 8516 skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers); 8517 skipCall |= 8518 ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, 8519 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); 8520 } 8521 lock.unlock(); 8522 if (!skipCall) 8523 dev_data->device_dispatch_table->CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask, 8524 memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, 8525 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); 8526} 8527 8528VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 8529vkCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, 8530 VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, 8531 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, 8532 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { 8533 bool skipCall = false; 8534 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8535 std::unique_lock<std::mutex> lock(global_lock); 8536 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8537 if (pCB) { 8538 skipCall |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()"); 8539 skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers); 8540 skipCall |= 8541 ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, 8542 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); 8543 } 8544 lock.unlock(); 8545 if (!skipCall) 8546 dev_data->device_dispatch_table->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, 8547 memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, 8548 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); 8549} 8550 8551VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 8552vkCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) { 8553 bool skipCall = false; 8554 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8555 std::unique_lock<std::mutex> lock(global_lock); 8556 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8557 if (pCB) { 8558 QueryObject query = {queryPool, slot}; 8559 pCB->activeQueries.insert(query); 8560 if (!pCB->startedQueries.count(query)) { 8561 pCB->startedQueries.insert(query); 8562 } 8563 skipCall |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()"); 8564 } 8565 lock.unlock(); 8566 if (!skipCall) 8567 dev_data->device_dispatch_table->CmdBeginQuery(commandBuffer, queryPool, slot, flags); 8568} 8569 8570VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) { 8571 bool skipCall = false; 8572 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8573 std::unique_lock<std::mutex> lock(global_lock); 8574 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8575 if (pCB) { 8576 QueryObject query = {queryPool, slot}; 8577 if (!pCB->activeQueries.count(query)) { 8578 skipCall |= 8579 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8580 DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool %" PRIu64 ", index %d", 8581 (uint64_t)(queryPool), slot); 8582 } else { 8583 pCB->activeQueries.erase(query); 8584 } 8585 pCB->queryToStateMap[query] = 1; 8586 if (pCB->state == CB_RECORDING) { 8587 skipCall |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()"); 8588 } else { 8589 skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()"); 8590 } 8591 } 8592 lock.unlock(); 8593 if (!skipCall) 8594 dev_data->device_dispatch_table->CmdEndQuery(commandBuffer, queryPool, slot); 8595} 8596 8597VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 8598vkCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) { 8599 bool skipCall = false; 8600 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8601 std::unique_lock<std::mutex> lock(global_lock); 8602 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8603 if (pCB) { 8604 for (uint32_t i = 0; i < queryCount; i++) { 8605 QueryObject query = {queryPool, firstQuery + i}; 8606 pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents; 8607 pCB->queryToStateMap[query] = 0; 8608 } 8609 if (pCB->state == CB_RECORDING) { 8610 skipCall |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()"); 8611 } else { 8612 skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()"); 8613 } 8614 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool"); 8615 } 8616 lock.unlock(); 8617 if (!skipCall) 8618 dev_data->device_dispatch_table->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount); 8619} 8620 8621VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 8622vkCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, 8623 VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) { 8624 bool skipCall = false; 8625 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8626 std::unique_lock<std::mutex> lock(global_lock); 8627 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8628#if MTMERGESOURCE 8629 VkDeviceMemory mem; 8630 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 8631 skipCall |= 8632 get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem); 8633 if (cb_data != dev_data->commandBufferMap.end()) { 8634 std::function<bool()> function = [=]() { 8635 set_memory_valid(dev_data, mem, true); 8636 return false; 8637 }; 8638 cb_data->second->validate_functions.push_back(function); 8639 } 8640 skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyQueryPoolResults"); 8641 // Validate that DST buffer has correct usage flags set 8642 skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, 8643 "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); 8644#endif 8645 if (pCB) { 8646 for (uint32_t i = 0; i < queryCount; i++) { 8647 QueryObject query = {queryPool, firstQuery + i}; 8648 if (!pCB->queryToStateMap[query]) { 8649 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 8650 __LINE__, DRAWSTATE_INVALID_QUERY, "DS", 8651 "Requesting a copy from query to buffer with invalid query: queryPool %" PRIu64 ", index %d", 8652 (uint64_t)(queryPool), firstQuery + i); 8653 } 8654 } 8655 if (pCB->state == CB_RECORDING) { 8656 skipCall |= addCmd(dev_data, pCB, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()"); 8657 } else { 8658 skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()"); 8659 } 8660 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyQueryPoolResults"); 8661 } 8662 lock.unlock(); 8663 if (!skipCall) 8664 dev_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, 8665 dstOffset, stride, flags); 8666} 8667 8668VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, 8669 VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, 8670 const void *pValues) { 8671 bool skipCall = false; 8672 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8673 std::unique_lock<std::mutex> lock(global_lock); 8674 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8675 if (pCB) { 8676 if (pCB->state == CB_RECORDING) { 8677 skipCall |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()"); 8678 } else { 8679 skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()"); 8680 } 8681 } 8682 if ((offset + size) > dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize) { 8683 skipCall |= validatePushConstantSize(dev_data, offset, size, "vkCmdPushConstants()"); 8684 } 8685 // TODO : Add warning if push constant update doesn't align with range 8686 lock.unlock(); 8687 if (!skipCall) 8688 dev_data->device_dispatch_table->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues); 8689} 8690 8691VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 8692vkCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) { 8693 bool skipCall = false; 8694 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 8695 std::unique_lock<std::mutex> lock(global_lock); 8696 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 8697 if (pCB) { 8698 QueryObject query = {queryPool, slot}; 8699 pCB->queryToStateMap[query] = 1; 8700 if (pCB->state == CB_RECORDING) { 8701 skipCall |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()"); 8702 } else { 8703 skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()"); 8704 } 8705 } 8706 lock.unlock(); 8707 if (!skipCall) 8708 dev_data->device_dispatch_table->CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot); 8709} 8710 8711VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo, 8712 const VkAllocationCallbacks *pAllocator, 8713 VkFramebuffer *pFramebuffer) { 8714 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 8715 VkResult result = dev_data->device_dispatch_table->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer); 8716 if (VK_SUCCESS == result) { 8717 // Shadow create info and store in map 8718 std::lock_guard<std::mutex> lock(global_lock); 8719 8720 auto & fbNode = dev_data->frameBufferMap[*pFramebuffer]; 8721 fbNode.createInfo = *pCreateInfo; 8722 if (pCreateInfo->pAttachments) { 8723 auto attachments = new VkImageView[pCreateInfo->attachmentCount]; 8724 memcpy(attachments, 8725 pCreateInfo->pAttachments, 8726 pCreateInfo->attachmentCount * sizeof(VkImageView)); 8727 fbNode.createInfo.pAttachments = attachments; 8728 } 8729 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { 8730 VkImageView view = pCreateInfo->pAttachments[i]; 8731 auto view_data = dev_data->imageViewMap.find(view); 8732 if (view_data == dev_data->imageViewMap.end()) { 8733 continue; 8734 } 8735 MT_FB_ATTACHMENT_INFO fb_info; 8736 get_mem_binding_from_object(dev_data, (uint64_t)(view_data->second.image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 8737 &fb_info.mem); 8738 fb_info.image = view_data->second.image; 8739 fbNode.attachments.push_back(fb_info); 8740 } 8741 } 8742 return result; 8743} 8744 8745static bool FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node, 8746 std::unordered_set<uint32_t> &processed_nodes) { 8747 // If we have already checked this node we have not found a dependency path so return false. 8748 if (processed_nodes.count(index)) 8749 return false; 8750 processed_nodes.insert(index); 8751 const DAGNode &node = subpass_to_node[index]; 8752 // Look for a dependency path. If one exists return true else recurse on the previous nodes. 8753 if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) { 8754 for (auto elem : node.prev) { 8755 if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) 8756 return true; 8757 } 8758 } else { 8759 return true; 8760 } 8761 return false; 8762} 8763 8764static bool CheckDependencyExists(const layer_data *my_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses, 8765 const std::vector<DAGNode> &subpass_to_node, bool &skip_call) { 8766 bool result = true; 8767 // Loop through all subpasses that share the same attachment and make sure a dependency exists 8768 for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) { 8769 if (static_cast<uint32_t>(subpass) == dependent_subpasses[k]) 8770 continue; 8771 const DAGNode &node = subpass_to_node[subpass]; 8772 // Check for a specified dependency between the two nodes. If one exists we are done. 8773 auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]); 8774 auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]); 8775 if (prev_elem == node.prev.end() && next_elem == node.next.end()) { 8776 // If no dependency exits an implicit dependency still might. If so, warn and if not throw an error. 8777 std::unordered_set<uint32_t> processed_nodes; 8778 if (FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) || 8779 FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes)) { 8780 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 8781 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 8782 "A dependency between subpasses %d and %d must exist but only an implicit one is specified.", 8783 subpass, dependent_subpasses[k]); 8784 } else { 8785 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 8786 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 8787 "A dependency between subpasses %d and %d must exist but one is not specified.", subpass, 8788 dependent_subpasses[k]); 8789 result = false; 8790 } 8791 } 8792 } 8793 return result; 8794} 8795 8796static bool CheckPreserved(const layer_data *my_data, const VkRenderPassCreateInfo *pCreateInfo, const int index, 8797 const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip_call) { 8798 const DAGNode &node = subpass_to_node[index]; 8799 // If this node writes to the attachment return true as next nodes need to preserve the attachment. 8800 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index]; 8801 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { 8802 if (attachment == subpass.pColorAttachments[j].attachment) 8803 return true; 8804 } 8805 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { 8806 if (attachment == subpass.pDepthStencilAttachment->attachment) 8807 return true; 8808 } 8809 bool result = false; 8810 // Loop through previous nodes and see if any of them write to the attachment. 8811 for (auto elem : node.prev) { 8812 result |= CheckPreserved(my_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call); 8813 } 8814 // If the attachment was written to by a previous node than this node needs to preserve it. 8815 if (result && depth > 0) { 8816 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index]; 8817 bool has_preserved = false; 8818 for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) { 8819 if (subpass.pPreserveAttachments[j] == attachment) { 8820 has_preserved = true; 8821 break; 8822 } 8823 } 8824 if (!has_preserved) { 8825 skip_call |= 8826 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8827 DRAWSTATE_INVALID_RENDERPASS, "DS", 8828 "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index); 8829 } 8830 } 8831 return result; 8832} 8833 8834template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) { 8835 return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) || 8836 ((offset1 > offset2) && (offset1 < (offset2 + size2))); 8837} 8838 8839bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) { 8840 return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) && 8841 isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount)); 8842} 8843 8844static bool ValidateDependencies(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin, 8845 const std::vector<DAGNode> &subpass_to_node) { 8846 bool skip_call = false; 8847 const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo; 8848 const VkRenderPassCreateInfo *pCreateInfo = my_data->renderPassMap.at(pRenderPassBegin->renderPass)->pCreateInfo; 8849 std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount); 8850 std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount); 8851 std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount); 8852 // Find overlapping attachments 8853 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { 8854 for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) { 8855 VkImageView viewi = pFramebufferInfo->pAttachments[i]; 8856 VkImageView viewj = pFramebufferInfo->pAttachments[j]; 8857 if (viewi == viewj) { 8858 overlapping_attachments[i].push_back(j); 8859 overlapping_attachments[j].push_back(i); 8860 continue; 8861 } 8862 auto view_data_i = my_data->imageViewMap.find(viewi); 8863 auto view_data_j = my_data->imageViewMap.find(viewj); 8864 if (view_data_i == my_data->imageViewMap.end() || view_data_j == my_data->imageViewMap.end()) { 8865 continue; 8866 } 8867 if (view_data_i->second.image == view_data_j->second.image && 8868 isRegionOverlapping(view_data_i->second.subresourceRange, view_data_j->second.subresourceRange)) { 8869 overlapping_attachments[i].push_back(j); 8870 overlapping_attachments[j].push_back(i); 8871 continue; 8872 } 8873 auto image_data_i = my_data->imageMap.find(view_data_i->second.image); 8874 auto image_data_j = my_data->imageMap.find(view_data_j->second.image); 8875 if (image_data_i == my_data->imageMap.end() || image_data_j == my_data->imageMap.end()) { 8876 continue; 8877 } 8878 if (image_data_i->second.mem == image_data_j->second.mem && 8879 isRangeOverlapping(image_data_i->second.memOffset, image_data_i->second.memSize, image_data_j->second.memOffset, 8880 image_data_j->second.memSize)) { 8881 overlapping_attachments[i].push_back(j); 8882 overlapping_attachments[j].push_back(i); 8883 } 8884 } 8885 } 8886 for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) { 8887 uint32_t attachment = i; 8888 for (auto other_attachment : overlapping_attachments[i]) { 8889 if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) { 8890 skip_call |= 8891 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8892 DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't " 8893 "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.", 8894 attachment, other_attachment); 8895 } 8896 if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) { 8897 skip_call |= 8898 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8899 DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't " 8900 "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.", 8901 other_attachment, attachment); 8902 } 8903 } 8904 } 8905 // Find for each attachment the subpasses that use them. 8906 unordered_set<uint32_t> attachmentIndices; 8907 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { 8908 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i]; 8909 attachmentIndices.clear(); 8910 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { 8911 uint32_t attachment = subpass.pInputAttachments[j].attachment; 8912 input_attachment_to_subpass[attachment].push_back(i); 8913 for (auto overlapping_attachment : overlapping_attachments[attachment]) { 8914 input_attachment_to_subpass[overlapping_attachment].push_back(i); 8915 } 8916 } 8917 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { 8918 uint32_t attachment = subpass.pColorAttachments[j].attachment; 8919 output_attachment_to_subpass[attachment].push_back(i); 8920 for (auto overlapping_attachment : overlapping_attachments[attachment]) { 8921 output_attachment_to_subpass[overlapping_attachment].push_back(i); 8922 } 8923 attachmentIndices.insert(attachment); 8924 } 8925 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { 8926 uint32_t attachment = subpass.pDepthStencilAttachment->attachment; 8927 output_attachment_to_subpass[attachment].push_back(i); 8928 for (auto overlapping_attachment : overlapping_attachments[attachment]) { 8929 output_attachment_to_subpass[overlapping_attachment].push_back(i); 8930 } 8931 8932 if (attachmentIndices.count(attachment)) { 8933 skip_call |= 8934 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 8935 0, __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 8936 "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", 8937 attachment, i); 8938 } 8939 } 8940 } 8941 // If there is a dependency needed make sure one exists 8942 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { 8943 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i]; 8944 // If the attachment is an input then all subpasses that output must have a dependency relationship 8945 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { 8946 const uint32_t &attachment = subpass.pInputAttachments[j].attachment; 8947 CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call); 8948 } 8949 // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship 8950 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { 8951 const uint32_t &attachment = subpass.pColorAttachments[j].attachment; 8952 CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call); 8953 CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call); 8954 } 8955 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { 8956 const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment; 8957 CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call); 8958 CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call); 8959 } 8960 } 8961 // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was 8962 // written. 8963 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { 8964 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i]; 8965 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { 8966 CheckPreserved(my_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call); 8967 } 8968 } 8969 return skip_call; 8970} 8971 8972static bool ValidateLayouts(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) { 8973 bool skip = false; 8974 8975 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { 8976 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i]; 8977 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { 8978 if (subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL && 8979 subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) { 8980 if (subpass.pInputAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) { 8981 // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance 8982 skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, 8983 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 8984 "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL."); 8985 } else { 8986 skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 8987 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 8988 "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.", 8989 string_VkImageLayout(subpass.pInputAttachments[j].layout)); 8990 } 8991 } 8992 } 8993 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { 8994 if (subpass.pColorAttachments[j].layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) { 8995 if (subpass.pColorAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) { 8996 // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance 8997 skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, 8998 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 8999 "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL."); 9000 } else { 9001 skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9002 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 9003 "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.", 9004 string_VkImageLayout(subpass.pColorAttachments[j].layout)); 9005 } 9006 } 9007 } 9008 if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) { 9009 if (subpass.pDepthStencilAttachment->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) { 9010 if (subpass.pDepthStencilAttachment->layout == VK_IMAGE_LAYOUT_GENERAL) { 9011 // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance 9012 skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, 9013 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 9014 "Layout for depth attachment is GENERAL but should be DEPTH_STENCIL_ATTACHMENT_OPTIMAL."); 9015 } else { 9016 skip |= 9017 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9018 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 9019 "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL or GENERAL.", 9020 string_VkImageLayout(subpass.pDepthStencilAttachment->layout)); 9021 } 9022 } 9023 } 9024 } 9025 return skip; 9026} 9027 9028static bool CreatePassDAG(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo, 9029 std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) { 9030 bool skip_call = false; 9031 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { 9032 DAGNode &subpass_node = subpass_to_node[i]; 9033 subpass_node.pass = i; 9034 } 9035 for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) { 9036 const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i]; 9037 if (dependency.srcSubpass > dependency.dstSubpass && dependency.srcSubpass != VK_SUBPASS_EXTERNAL && 9038 dependency.dstSubpass != VK_SUBPASS_EXTERNAL) { 9039 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9040 DRAWSTATE_INVALID_RENDERPASS, "DS", 9041 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass."); 9042 } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL && dependency.dstSubpass == VK_SUBPASS_EXTERNAL) { 9043 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9044 DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external."); 9045 } else if (dependency.srcSubpass == dependency.dstSubpass) { 9046 has_self_dependency[dependency.srcSubpass] = true; 9047 } 9048 if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL) { 9049 subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass); 9050 } 9051 if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL) { 9052 subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass); 9053 } 9054 } 9055 return skip_call; 9056} 9057 9058 9059VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo, 9060 const VkAllocationCallbacks *pAllocator, 9061 VkShaderModule *pShaderModule) { 9062 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 9063 bool skip_call = false; 9064 if (!shader_is_spirv(pCreateInfo)) { 9065 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 9066 /* dev */ 0, __LINE__, SHADER_CHECKER_NON_SPIRV_SHADER, "SC", "Shader is not SPIR-V"); 9067 } 9068 9069 if (skip_call) 9070 return VK_ERROR_VALIDATION_FAILED_EXT; 9071 9072 VkResult res = my_data->device_dispatch_table->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule); 9073 9074 if (res == VK_SUCCESS) { 9075 std::lock_guard<std::mutex> lock(global_lock); 9076 my_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo)); 9077 } 9078 return res; 9079} 9080 9081VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo, 9082 const VkAllocationCallbacks *pAllocator, 9083 VkRenderPass *pRenderPass) { 9084 bool skip_call = false; 9085 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 9086 std::unique_lock<std::mutex> lock(global_lock); 9087 // Create DAG 9088 std::vector<bool> has_self_dependency(pCreateInfo->subpassCount); 9089 std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount); 9090 skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency); 9091 // Validate 9092 skip_call |= ValidateLayouts(dev_data, device, pCreateInfo); 9093 if (skip_call) { 9094 lock.unlock(); 9095 return VK_ERROR_VALIDATION_FAILED_EXT; 9096 } 9097 lock.unlock(); 9098 VkResult result = dev_data->device_dispatch_table->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass); 9099 if (VK_SUCCESS == result) { 9100 lock.lock(); 9101 // TODOSC : Merge in tracking of renderpass from shader_checker 9102 // Shadow create info and store in map 9103 VkRenderPassCreateInfo *localRPCI = new VkRenderPassCreateInfo(*pCreateInfo); 9104 if (pCreateInfo->pAttachments) { 9105 localRPCI->pAttachments = new VkAttachmentDescription[localRPCI->attachmentCount]; 9106 memcpy((void *)localRPCI->pAttachments, pCreateInfo->pAttachments, 9107 localRPCI->attachmentCount * sizeof(VkAttachmentDescription)); 9108 } 9109 if (pCreateInfo->pSubpasses) { 9110 localRPCI->pSubpasses = new VkSubpassDescription[localRPCI->subpassCount]; 9111 memcpy((void *)localRPCI->pSubpasses, pCreateInfo->pSubpasses, localRPCI->subpassCount * sizeof(VkSubpassDescription)); 9112 9113 for (uint32_t i = 0; i < localRPCI->subpassCount; i++) { 9114 VkSubpassDescription *subpass = (VkSubpassDescription *)&localRPCI->pSubpasses[i]; 9115 const uint32_t attachmentCount = subpass->inputAttachmentCount + 9116 subpass->colorAttachmentCount * (1 + (subpass->pResolveAttachments ? 1 : 0)) + 9117 ((subpass->pDepthStencilAttachment) ? 1 : 0) + subpass->preserveAttachmentCount; 9118 VkAttachmentReference *attachments = new VkAttachmentReference[attachmentCount]; 9119 9120 memcpy(attachments, subpass->pInputAttachments, sizeof(attachments[0]) * subpass->inputAttachmentCount); 9121 subpass->pInputAttachments = attachments; 9122 attachments += subpass->inputAttachmentCount; 9123 9124 memcpy(attachments, subpass->pColorAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount); 9125 subpass->pColorAttachments = attachments; 9126 attachments += subpass->colorAttachmentCount; 9127 9128 if (subpass->pResolveAttachments) { 9129 memcpy(attachments, subpass->pResolveAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount); 9130 subpass->pResolveAttachments = attachments; 9131 attachments += subpass->colorAttachmentCount; 9132 } 9133 9134 if (subpass->pDepthStencilAttachment) { 9135 memcpy(attachments, subpass->pDepthStencilAttachment, sizeof(attachments[0]) * 1); 9136 subpass->pDepthStencilAttachment = attachments; 9137 attachments += 1; 9138 } 9139 9140 memcpy(attachments, subpass->pPreserveAttachments, sizeof(attachments[0]) * subpass->preserveAttachmentCount); 9141 subpass->pPreserveAttachments = &attachments->attachment; 9142 } 9143 } 9144 if (pCreateInfo->pDependencies) { 9145 localRPCI->pDependencies = new VkSubpassDependency[localRPCI->dependencyCount]; 9146 memcpy((void *)localRPCI->pDependencies, pCreateInfo->pDependencies, 9147 localRPCI->dependencyCount * sizeof(VkSubpassDependency)); 9148 } 9149 dev_data->renderPassMap[*pRenderPass] = new RENDER_PASS_NODE(localRPCI); 9150 dev_data->renderPassMap[*pRenderPass]->hasSelfDependency = has_self_dependency; 9151 dev_data->renderPassMap[*pRenderPass]->subpassToNode = subpass_to_node; 9152#if MTMERGESOURCE 9153 // MTMTODO : Merge with code from above to eliminate duplication 9154 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { 9155 VkAttachmentDescription desc = pCreateInfo->pAttachments[i]; 9156 MT_PASS_ATTACHMENT_INFO pass_info; 9157 pass_info.load_op = desc.loadOp; 9158 pass_info.store_op = desc.storeOp; 9159 pass_info.attachment = i; 9160 dev_data->renderPassMap[*pRenderPass]->attachments.push_back(pass_info); 9161 } 9162 // TODO: Maybe fill list and then copy instead of locking 9163 std::unordered_map<uint32_t, bool> &attachment_first_read = dev_data->renderPassMap[*pRenderPass]->attachment_first_read; 9164 std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout = 9165 dev_data->renderPassMap[*pRenderPass]->attachment_first_layout; 9166 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { 9167 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i]; 9168 if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) { 9169 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 9170 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 9171 "Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", i); 9172 } 9173 for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) { 9174 uint32_t attachment = subpass.pPreserveAttachments[j]; 9175 if (attachment >= pCreateInfo->attachmentCount) { 9176 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 9177 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 9178 "Preserve attachment %d cannot be greater than the total number of attachments %d.", 9179 attachment, pCreateInfo->attachmentCount); 9180 } 9181 } 9182 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { 9183 uint32_t attachment; 9184 if (subpass.pResolveAttachments) { 9185 attachment = subpass.pResolveAttachments[j].attachment; 9186 if (attachment >= pCreateInfo->attachmentCount && attachment != VK_ATTACHMENT_UNUSED) { 9187 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 9188 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 9189 "Color attachment %d cannot be greater than the total number of attachments %d.", 9190 attachment, pCreateInfo->attachmentCount); 9191 continue; 9192 } 9193 } 9194 attachment = subpass.pColorAttachments[j].attachment; 9195 if (attachment >= pCreateInfo->attachmentCount) { 9196 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 9197 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 9198 "Color attachment %d cannot be greater than the total number of attachments %d.", 9199 attachment, pCreateInfo->attachmentCount); 9200 continue; 9201 } 9202 if (attachment_first_read.count(attachment)) 9203 continue; 9204 attachment_first_read.insert(std::make_pair(attachment, false)); 9205 attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout)); 9206 } 9207 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { 9208 uint32_t attachment = subpass.pDepthStencilAttachment->attachment; 9209 if (attachment >= pCreateInfo->attachmentCount) { 9210 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 9211 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 9212 "Depth stencil attachment %d cannot be greater than the total number of attachments %d.", 9213 attachment, pCreateInfo->attachmentCount); 9214 continue; 9215 } 9216 if (attachment_first_read.count(attachment)) 9217 continue; 9218 attachment_first_read.insert(std::make_pair(attachment, false)); 9219 attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout)); 9220 } 9221 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { 9222 uint32_t attachment = subpass.pInputAttachments[j].attachment; 9223 if (attachment >= pCreateInfo->attachmentCount) { 9224 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 9225 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", 9226 "Input attachment %d cannot be greater than the total number of attachments %d.", 9227 attachment, pCreateInfo->attachmentCount); 9228 continue; 9229 } 9230 if (attachment_first_read.count(attachment)) 9231 continue; 9232 attachment_first_read.insert(std::make_pair(attachment, true)); 9233 attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout)); 9234 } 9235 } 9236#endif 9237 lock.unlock(); 9238 } 9239 return result; 9240} 9241// Free the renderpass shadow 9242static void deleteRenderPasses(layer_data *my_data) { 9243 if (my_data->renderPassMap.size() <= 0) 9244 return; 9245 for (auto ii = my_data->renderPassMap.begin(); ii != my_data->renderPassMap.end(); ++ii) { 9246 const VkRenderPassCreateInfo *pRenderPassInfo = (*ii).second->pCreateInfo; 9247 delete[] pRenderPassInfo->pAttachments; 9248 if (pRenderPassInfo->pSubpasses) { 9249 for (uint32_t i = 0; i < pRenderPassInfo->subpassCount; ++i) { 9250 // Attachements are all allocated in a block, so just need to 9251 // find the first non-null one to delete 9252 if (pRenderPassInfo->pSubpasses[i].pInputAttachments) { 9253 delete[] pRenderPassInfo->pSubpasses[i].pInputAttachments; 9254 } else if (pRenderPassInfo->pSubpasses[i].pColorAttachments) { 9255 delete[] pRenderPassInfo->pSubpasses[i].pColorAttachments; 9256 } else if (pRenderPassInfo->pSubpasses[i].pResolveAttachments) { 9257 delete[] pRenderPassInfo->pSubpasses[i].pResolveAttachments; 9258 } else if (pRenderPassInfo->pSubpasses[i].pPreserveAttachments) { 9259 delete[] pRenderPassInfo->pSubpasses[i].pPreserveAttachments; 9260 } 9261 } 9262 delete[] pRenderPassInfo->pSubpasses; 9263 } 9264 delete[] pRenderPassInfo->pDependencies; 9265 delete pRenderPassInfo; 9266 delete (*ii).second; 9267 } 9268 my_data->renderPassMap.clear(); 9269} 9270 9271static bool VerifyFramebufferAndRenderPassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) { 9272 bool skip_call = false; 9273 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map); 9274 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer); 9275 const VkRenderPassCreateInfo *pRenderPassInfo = dev_data->renderPassMap[pRenderPassBegin->renderPass]->pCreateInfo; 9276 const VkFramebufferCreateInfo framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer].createInfo; 9277 if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) { 9278 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9279 DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer " 9280 "with a different number of attachments."); 9281 } 9282 for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) { 9283 const VkImageView &image_view = framebufferInfo.pAttachments[i]; 9284 auto image_data = dev_data->imageViewMap.find(image_view); 9285 assert(image_data != dev_data->imageViewMap.end()); 9286 const VkImage &image = image_data->second.image; 9287 const VkImageSubresourceRange &subRange = image_data->second.subresourceRange; 9288 IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout, 9289 pRenderPassInfo->pAttachments[i].initialLayout}; 9290 // TODO: Do not iterate over every possibility - consolidate where possible 9291 for (uint32_t j = 0; j < subRange.levelCount; j++) { 9292 uint32_t level = subRange.baseMipLevel + j; 9293 for (uint32_t k = 0; k < subRange.layerCount; k++) { 9294 uint32_t layer = subRange.baseArrayLayer + k; 9295 VkImageSubresource sub = {subRange.aspectMask, level, layer}; 9296 IMAGE_CMD_BUF_LAYOUT_NODE node; 9297 if (!FindLayout(pCB, image, sub, node)) { 9298 SetLayout(pCB, image, sub, newNode); 9299 continue; 9300 } 9301 if (newNode.layout != node.layout) { 9302 skip_call |= 9303 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9304 DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using attachment %i " 9305 "where the " 9306 "initial layout is %s and the layout of the attachment at the " 9307 "start of the render pass is %s. The layouts must match.", 9308 i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout)); 9309 } 9310 } 9311 } 9312 } 9313 return skip_call; 9314} 9315 9316static void TransitionSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, 9317 const int subpass_index) { 9318 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map); 9319 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer); 9320 auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass); 9321 if (render_pass_data == dev_data->renderPassMap.end()) { 9322 return; 9323 } 9324 const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo; 9325 auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer); 9326 if (framebuffer_data == dev_data->frameBufferMap.end()) { 9327 return; 9328 } 9329 const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo; 9330 const VkSubpassDescription &subpass = pRenderPassInfo->pSubpasses[subpass_index]; 9331 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { 9332 const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pInputAttachments[j].attachment]; 9333 SetLayout(dev_data, pCB, image_view, subpass.pInputAttachments[j].layout); 9334 } 9335 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { 9336 const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pColorAttachments[j].attachment]; 9337 SetLayout(dev_data, pCB, image_view, subpass.pColorAttachments[j].layout); 9338 } 9339 if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) { 9340 const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pDepthStencilAttachment->attachment]; 9341 SetLayout(dev_data, pCB, image_view, subpass.pDepthStencilAttachment->layout); 9342 } 9343} 9344 9345static bool validatePrimaryCommandBuffer(const layer_data *my_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) { 9346 bool skip_call = false; 9347 if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) { 9348 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9349 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.", 9350 cmd_name.c_str()); 9351 } 9352 return skip_call; 9353} 9354 9355static void TransitionFinalSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) { 9356 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map); 9357 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer); 9358 auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass); 9359 if (render_pass_data == dev_data->renderPassMap.end()) { 9360 return; 9361 } 9362 const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo; 9363 auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer); 9364 if (framebuffer_data == dev_data->frameBufferMap.end()) { 9365 return; 9366 } 9367 const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo; 9368 for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) { 9369 const VkImageView &image_view = framebufferInfo.pAttachments[i]; 9370 SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout); 9371 } 9372} 9373 9374static bool VerifyRenderAreaBounds(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin) { 9375 bool skip_call = false; 9376 const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo; 9377 if (pRenderPassBegin->renderArea.offset.x < 0 || 9378 (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width || 9379 pRenderPassBegin->renderArea.offset.y < 0 || 9380 (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) { 9381 skip_call |= static_cast<bool>(log_msg( 9382 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9383 DRAWSTATE_INVALID_RENDER_AREA, "CORE", 9384 "Cannot execute a render pass with renderArea not within the bound of the " 9385 "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, " 9386 "height %d.", 9387 pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width, 9388 pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height)); 9389 } 9390 return skip_call; 9391} 9392 9393VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 9394vkCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) { 9395 bool skipCall = false; 9396 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 9397 std::unique_lock<std::mutex> lock(global_lock); 9398 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 9399 if (pCB) { 9400 if (pRenderPassBegin && pRenderPassBegin->renderPass) { 9401#if MTMERGE 9402 auto pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass); 9403 if (pass_data != dev_data->renderPassMap.end()) { 9404 RENDER_PASS_NODE* pRPNode = pass_data->second; 9405 pRPNode->fb = pRenderPassBegin->framebuffer; 9406 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 9407 for (size_t i = 0; i < pRPNode->attachments.size(); ++i) { 9408 MT_FB_ATTACHMENT_INFO &fb_info = dev_data->frameBufferMap[pRPNode->fb].attachments[i]; 9409 if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) { 9410 if (cb_data != dev_data->commandBufferMap.end()) { 9411 std::function<bool()> function = [=]() { 9412 set_memory_valid(dev_data, fb_info.mem, true, fb_info.image); 9413 return false; 9414 }; 9415 cb_data->second->validate_functions.push_back(function); 9416 } 9417 VkImageLayout &attachment_layout = pRPNode->attachment_first_layout[pRPNode->attachments[i].attachment]; 9418 if (attachment_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL || 9419 attachment_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) { 9420 skipCall |= 9421 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 9422 VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, (uint64_t)(pRenderPassBegin->renderPass), __LINE__, 9423 MEMTRACK_INVALID_LAYOUT, "MEM", "Cannot clear attachment %d with invalid first layout %d.", 9424 pRPNode->attachments[i].attachment, attachment_layout); 9425 } 9426 } else if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE) { 9427 if (cb_data != dev_data->commandBufferMap.end()) { 9428 std::function<bool()> function = [=]() { 9429 set_memory_valid(dev_data, fb_info.mem, false, fb_info.image); 9430 return false; 9431 }; 9432 cb_data->second->validate_functions.push_back(function); 9433 } 9434 } else if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_LOAD) { 9435 if (cb_data != dev_data->commandBufferMap.end()) { 9436 std::function<bool()> function = [=]() { 9437 return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image); 9438 }; 9439 cb_data->second->validate_functions.push_back(function); 9440 } 9441 } 9442 if (pRPNode->attachment_first_read[pRPNode->attachments[i].attachment]) { 9443 if (cb_data != dev_data->commandBufferMap.end()) { 9444 std::function<bool()> function = [=]() { 9445 return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image); 9446 }; 9447 cb_data->second->validate_functions.push_back(function); 9448 } 9449 } 9450 } 9451 } 9452#endif 9453 skipCall |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin); 9454 skipCall |= VerifyFramebufferAndRenderPassLayouts(commandBuffer, pRenderPassBegin); 9455 auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass); 9456 if (render_pass_data != dev_data->renderPassMap.end()) { 9457 skipCall |= ValidateDependencies(dev_data, pRenderPassBegin, render_pass_data->second->subpassToNode); 9458 } 9459 skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBeginRenderPass"); 9460 skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdBeginRenderPass"); 9461 skipCall |= addCmd(dev_data, pCB, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()"); 9462 pCB->activeRenderPass = pRenderPassBegin->renderPass; 9463 // This is a shallow copy as that is all that is needed for now 9464 pCB->activeRenderPassBeginInfo = *pRenderPassBegin; 9465 pCB->activeSubpass = 0; 9466 pCB->activeSubpassContents = contents; 9467 pCB->framebuffers.insert(pRenderPassBegin->framebuffer); 9468 // Connect this framebuffer to this cmdBuffer 9469 dev_data->frameBufferMap[pRenderPassBegin->framebuffer].referencingCmdBuffers.insert(pCB->commandBuffer); 9470 } else { 9471 skipCall |= 9472 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9473 DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()"); 9474 } 9475 } 9476 lock.unlock(); 9477 if (!skipCall) { 9478 dev_data->device_dispatch_table->CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents); 9479 } 9480} 9481 9482VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) { 9483 bool skipCall = false; 9484 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 9485 std::unique_lock<std::mutex> lock(global_lock); 9486 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 9487 if (pCB) { 9488 skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass"); 9489 skipCall |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()"); 9490 pCB->activeSubpass++; 9491 pCB->activeSubpassContents = contents; 9492 TransitionSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass); 9493 if (pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline) { 9494 skipCall |= validatePipelineState(dev_data, pCB, VK_PIPELINE_BIND_POINT_GRAPHICS, 9495 pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline); 9496 } 9497 skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass"); 9498 } 9499 lock.unlock(); 9500 if (!skipCall) 9501 dev_data->device_dispatch_table->CmdNextSubpass(commandBuffer, contents); 9502} 9503 9504VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(VkCommandBuffer commandBuffer) { 9505 bool skipCall = false; 9506 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 9507 std::unique_lock<std::mutex> lock(global_lock); 9508#if MTMERGESOURCE 9509 auto cb_data = dev_data->commandBufferMap.find(commandBuffer); 9510 if (cb_data != dev_data->commandBufferMap.end()) { 9511 auto pass_data = dev_data->renderPassMap.find(cb_data->second->activeRenderPass); 9512 if (pass_data != dev_data->renderPassMap.end()) { 9513 RENDER_PASS_NODE* pRPNode = pass_data->second; 9514 for (size_t i = 0; i < pRPNode->attachments.size(); ++i) { 9515 MT_FB_ATTACHMENT_INFO &fb_info = dev_data->frameBufferMap[pRPNode->fb].attachments[i]; 9516 if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_STORE) { 9517 if (cb_data != dev_data->commandBufferMap.end()) { 9518 std::function<bool()> function = [=]() { 9519 set_memory_valid(dev_data, fb_info.mem, true, fb_info.image); 9520 return false; 9521 }; 9522 cb_data->second->validate_functions.push_back(function); 9523 } 9524 } else if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_DONT_CARE) { 9525 if (cb_data != dev_data->commandBufferMap.end()) { 9526 std::function<bool()> function = [=]() { 9527 set_memory_valid(dev_data, fb_info.mem, false, fb_info.image); 9528 return false; 9529 }; 9530 cb_data->second->validate_functions.push_back(function); 9531 } 9532 } 9533 } 9534 } 9535 } 9536#endif 9537 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 9538 if (pCB) { 9539 skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass"); 9540 skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass"); 9541 skipCall |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()"); 9542 TransitionFinalSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo); 9543 pCB->activeRenderPass = 0; 9544 pCB->activeSubpass = 0; 9545 } 9546 lock.unlock(); 9547 if (!skipCall) 9548 dev_data->device_dispatch_table->CmdEndRenderPass(commandBuffer); 9549} 9550 9551static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass, 9552 VkRenderPass primaryPass, uint32_t primaryAttach, uint32_t secondaryAttach, 9553 const char *msg) { 9554 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9555 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", 9556 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64 9557 " that is not compatible with the current render pass %" PRIx64 "." 9558 "Attachment %" PRIu32 " is not compatible with %" PRIu32 ". %s", 9559 (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass), primaryAttach, secondaryAttach, 9560 msg); 9561} 9562 9563static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass, 9564 uint32_t primaryAttach, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass, 9565 uint32_t secondaryAttach, bool is_multi) { 9566 bool skip_call = false; 9567 auto primary_data = dev_data->renderPassMap.find(primaryPass); 9568 auto secondary_data = dev_data->renderPassMap.find(secondaryPass); 9569 if (primary_data->second->pCreateInfo->attachmentCount <= primaryAttach) { 9570 primaryAttach = VK_ATTACHMENT_UNUSED; 9571 } 9572 if (secondary_data->second->pCreateInfo->attachmentCount <= secondaryAttach) { 9573 secondaryAttach = VK_ATTACHMENT_UNUSED; 9574 } 9575 if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) { 9576 return skip_call; 9577 } 9578 if (primaryAttach == VK_ATTACHMENT_UNUSED) { 9579 skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach, 9580 secondaryAttach, "The first is unused while the second is not."); 9581 return skip_call; 9582 } 9583 if (secondaryAttach == VK_ATTACHMENT_UNUSED) { 9584 skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach, 9585 secondaryAttach, "The second is unused while the first is not."); 9586 return skip_call; 9587 } 9588 if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].format != 9589 secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].format) { 9590 skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach, 9591 secondaryAttach, "They have different formats."); 9592 } 9593 if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].samples != 9594 secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].samples) { 9595 skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach, 9596 secondaryAttach, "They have different samples."); 9597 } 9598 if (is_multi && 9599 primary_data->second->pCreateInfo->pAttachments[primaryAttach].flags != 9600 secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].flags) { 9601 skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach, 9602 secondaryAttach, "They have different flags."); 9603 } 9604 return skip_call; 9605} 9606 9607static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass, 9608 VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass, const int subpass, 9609 bool is_multi) { 9610 bool skip_call = false; 9611 auto primary_data = dev_data->renderPassMap.find(primaryPass); 9612 auto secondary_data = dev_data->renderPassMap.find(secondaryPass); 9613 const VkSubpassDescription &primary_desc = primary_data->second->pCreateInfo->pSubpasses[subpass]; 9614 const VkSubpassDescription &secondary_desc = secondary_data->second->pCreateInfo->pSubpasses[subpass]; 9615 uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount); 9616 for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) { 9617 uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED; 9618 if (i < primary_desc.inputAttachmentCount) { 9619 primary_input_attach = primary_desc.pInputAttachments[i].attachment; 9620 } 9621 if (i < secondary_desc.inputAttachmentCount) { 9622 secondary_input_attach = secondary_desc.pInputAttachments[i].attachment; 9623 } 9624 skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_input_attach, secondaryBuffer, 9625 secondaryPass, secondary_input_attach, is_multi); 9626 } 9627 uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount); 9628 for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) { 9629 uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED; 9630 if (i < primary_desc.colorAttachmentCount) { 9631 primary_color_attach = primary_desc.pColorAttachments[i].attachment; 9632 } 9633 if (i < secondary_desc.colorAttachmentCount) { 9634 secondary_color_attach = secondary_desc.pColorAttachments[i].attachment; 9635 } 9636 skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_color_attach, secondaryBuffer, 9637 secondaryPass, secondary_color_attach, is_multi); 9638 uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED; 9639 if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) { 9640 primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment; 9641 } 9642 if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) { 9643 secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment; 9644 } 9645 skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_resolve_attach, secondaryBuffer, 9646 secondaryPass, secondary_resolve_attach, is_multi); 9647 } 9648 uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED; 9649 if (primary_desc.pDepthStencilAttachment) { 9650 primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment; 9651 } 9652 if (secondary_desc.pDepthStencilAttachment) { 9653 secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment; 9654 } 9655 skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_depthstencil_attach, secondaryBuffer, 9656 secondaryPass, secondary_depthstencil_attach, is_multi); 9657 return skip_call; 9658} 9659 9660static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass, 9661 VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass) { 9662 bool skip_call = false; 9663 // Early exit if renderPass objects are identical (and therefore compatible) 9664 if (primaryPass == secondaryPass) 9665 return skip_call; 9666 auto primary_data = dev_data->renderPassMap.find(primaryPass); 9667 auto secondary_data = dev_data->renderPassMap.find(secondaryPass); 9668 if (primary_data == dev_data->renderPassMap.end() || primary_data->second == nullptr) { 9669 skip_call |= 9670 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9671 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", 9672 "vkCmdExecuteCommands() called w/ invalid current Cmd Buffer %p which has invalid render pass %" PRIx64 ".", 9673 (void *)primaryBuffer, (uint64_t)(primaryPass)); 9674 return skip_call; 9675 } 9676 if (secondary_data == dev_data->renderPassMap.end() || secondary_data->second == nullptr) { 9677 skip_call |= 9678 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9679 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", 9680 "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer %p which has invalid render pass %" PRIx64 ".", 9681 (void *)secondaryBuffer, (uint64_t)(secondaryPass)); 9682 return skip_call; 9683 } 9684 if (primary_data->second->pCreateInfo->subpassCount != secondary_data->second->pCreateInfo->subpassCount) { 9685 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9686 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", 9687 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64 9688 " that is not compatible with the current render pass %" PRIx64 "." 9689 "They have a different number of subpasses.", 9690 (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass)); 9691 return skip_call; 9692 } 9693 bool is_multi = primary_data->second->pCreateInfo->subpassCount > 1; 9694 for (uint32_t i = 0; i < primary_data->second->pCreateInfo->subpassCount; ++i) { 9695 skip_call |= 9696 validateSubpassCompatibility(dev_data, primaryBuffer, primaryPass, secondaryBuffer, secondaryPass, i, is_multi); 9697 } 9698 return skip_call; 9699} 9700 9701static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB, 9702 VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) { 9703 bool skip_call = false; 9704 if (!pSubCB->beginInfo.pInheritanceInfo) { 9705 return skip_call; 9706 } 9707 VkFramebuffer primary_fb = dev_data->renderPassMap[pCB->activeRenderPass]->fb; 9708 VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer; 9709 if (secondary_fb != VK_NULL_HANDLE) { 9710 if (primary_fb != secondary_fb) { 9711 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9712 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", 9713 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a framebuffer %" PRIx64 9714 " that is not compatible with the current framebuffer %" PRIx64 ".", 9715 (void *)secondaryBuffer, (uint64_t)(secondary_fb), (uint64_t)(primary_fb)); 9716 } 9717 auto fb_data = dev_data->frameBufferMap.find(secondary_fb); 9718 if (fb_data == dev_data->frameBufferMap.end()) { 9719 skip_call |= 9720 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9721 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p " 9722 "which has invalid framebuffer %" PRIx64 ".", 9723 (void *)secondaryBuffer, (uint64_t)(secondary_fb)); 9724 return skip_call; 9725 } 9726 skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb_data->second.createInfo.renderPass, 9727 secondaryBuffer, pSubCB->beginInfo.pInheritanceInfo->renderPass); 9728 } 9729 return skip_call; 9730} 9731 9732static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) { 9733 bool skipCall = false; 9734 unordered_set<int> activeTypes; 9735 for (auto queryObject : pCB->activeQueries) { 9736 auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool); 9737 if (queryPoolData != dev_data->queryPoolMap.end()) { 9738 if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS && 9739 pSubCB->beginInfo.pInheritanceInfo) { 9740 VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics; 9741 if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) { 9742 skipCall |= log_msg( 9743 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9744 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", 9745 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p " 9746 "which has invalid active query pool %" PRIx64 ". Pipeline statistics is being queried so the command " 9747 "buffer must have all bits set on the queryPool.", 9748 reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first)); 9749 } 9750 } 9751 activeTypes.insert(queryPoolData->second.createInfo.queryType); 9752 } 9753 } 9754 for (auto queryObject : pSubCB->startedQueries) { 9755 auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool); 9756 if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) { 9757 skipCall |= 9758 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9759 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", 9760 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p " 9761 "which has invalid active query pool %" PRIx64 "of type %d but a query of that type has been started on " 9762 "secondary Cmd Buffer %p.", 9763 reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first), 9764 queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer)); 9765 } 9766 } 9767 return skipCall; 9768} 9769 9770VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 9771vkCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) { 9772 bool skipCall = false; 9773 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); 9774 std::unique_lock<std::mutex> lock(global_lock); 9775 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); 9776 if (pCB) { 9777 GLOBAL_CB_NODE *pSubCB = NULL; 9778 for (uint32_t i = 0; i < commandBuffersCount; i++) { 9779 pSubCB = getCBNode(dev_data, pCommandBuffers[i]); 9780 if (!pSubCB) { 9781 skipCall |= 9782 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 9783 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", 9784 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p in element %u of pCommandBuffers array.", 9785 (void *)pCommandBuffers[i], i); 9786 } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) { 9787 skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 9788 __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", 9789 "vkCmdExecuteCommands() called w/ Primary Cmd Buffer %p in element %u of pCommandBuffers " 9790 "array. All cmd buffers in pCommandBuffers array must be secondary.", 9791 (void *)pCommandBuffers[i], i); 9792 } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set 9793 if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { 9794 skipCall |= log_msg( 9795 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 9796 (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS", 9797 "vkCmdExecuteCommands(): Secondary Command Buffer (%p) executed within render pass (%#" PRIxLEAST64 9798 ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.", 9799 (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass); 9800 } else { 9801 // Make sure render pass is compatible with parent command buffer pass if has continue 9802 skipCall |= validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass, pCommandBuffers[i], 9803 pSubCB->beginInfo.pInheritanceInfo->renderPass); 9804 skipCall |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB); 9805 } 9806 string errorString = ""; 9807 if (!verify_renderpass_compatibility(dev_data, pCB->activeRenderPass, 9808 pSubCB->beginInfo.pInheritanceInfo->renderPass, errorString)) { 9809 skipCall |= log_msg( 9810 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 9811 (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS", 9812 "vkCmdExecuteCommands(): Secondary Command Buffer (%p) w/ render pass (%#" PRIxLEAST64 9813 ") is incompatible w/ primary command buffer (%p) w/ render pass (%#" PRIxLEAST64 ") due to: %s", 9814 (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer, 9815 (uint64_t)pCB->activeRenderPass, errorString.c_str()); 9816 } 9817 // If framebuffer for secondary CB is not NULL, then it must match FB from vkCmdBeginRenderPass() 9818 // that this CB will be executed in AND framebuffer must have been created w/ RP compatible w/ renderpass 9819 if (pSubCB->beginInfo.pInheritanceInfo->framebuffer) { 9820 if (pSubCB->beginInfo.pInheritanceInfo->framebuffer != pCB->activeRenderPassBeginInfo.framebuffer) { 9821 skipCall |= log_msg( 9822 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 9823 (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS", 9824 "vkCmdExecuteCommands(): Secondary Command Buffer (%p) references framebuffer (%#" PRIxLEAST64 9825 ") that does not match framebuffer (%#" PRIxLEAST64 ") in active renderpass (%#" PRIxLEAST64 ").", 9826 (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->framebuffer, 9827 (uint64_t)pCB->activeRenderPassBeginInfo.framebuffer, (uint64_t)pCB->activeRenderPass); 9828 } 9829 } 9830 } 9831 // TODO(mlentine): Move more logic into this method 9832 skipCall |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB); 9833 skipCall |= validateCommandBufferState(dev_data, pSubCB); 9834 // Secondary cmdBuffers are considered pending execution starting w/ 9835 // being recorded 9836 if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) { 9837 if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) { 9838 skipCall |= log_msg( 9839 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 9840 (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS", 9841 "Attempt to simultaneously execute CB %#" PRIxLEAST64 " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT " 9842 "set!", 9843 (uint64_t)(pCB->commandBuffer)); 9844 } 9845 if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) { 9846 // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous 9847 skipCall |= log_msg( 9848 dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 9849 (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS", 9850 "vkCmdExecuteCommands(): Secondary Command Buffer (%#" PRIxLEAST64 9851 ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer " 9852 "(%#" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT " 9853 "set, even though it does.", 9854 (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer)); 9855 pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT; 9856 } 9857 } 9858 if (!pCB->activeQueries.empty() && !dev_data->phys_dev_properties.features.inheritedQueries) { 9859 skipCall |= 9860 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 9861 reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", 9862 "vkCmdExecuteCommands(): Secondary Command Buffer " 9863 "(%#" PRIxLEAST64 ") cannot be submitted with a query in " 9864 "flight and inherited queries not " 9865 "supported on this device.", 9866 reinterpret_cast<uint64_t>(pCommandBuffers[i])); 9867 } 9868 pSubCB->primaryCommandBuffer = pCB->commandBuffer; 9869 pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer); 9870 dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer); 9871 } 9872 skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands"); 9873 skipCall |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()"); 9874 } 9875 lock.unlock(); 9876 if (!skipCall) 9877 dev_data->device_dispatch_table->CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers); 9878} 9879 9880static bool ValidateMapImageLayouts(VkDevice device, VkDeviceMemory mem) { 9881 bool skip_call = false; 9882 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 9883 auto mem_data = dev_data->memObjMap.find(mem); 9884 if ((mem_data != dev_data->memObjMap.end()) && (mem_data->second.image != VK_NULL_HANDLE)) { 9885 std::vector<VkImageLayout> layouts; 9886 if (FindLayouts(dev_data, mem_data->second.image, layouts)) { 9887 for (auto layout : layouts) { 9888 if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) { 9889 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, 9890 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only " 9891 "GENERAL or PREINITIALIZED are supported.", 9892 string_VkImageLayout(layout)); 9893 } 9894 } 9895 } 9896 } 9897 return skip_call; 9898} 9899 9900VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 9901vkMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) { 9902 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 9903 9904 bool skip_call = false; 9905 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 9906 std::unique_lock<std::mutex> lock(global_lock); 9907#if MTMERGESOURCE 9908 DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem); 9909 if (pMemObj) { 9910 pMemObj->valid = true; 9911 if ((dev_data->phys_dev_mem_props.memoryTypes[pMemObj->allocInfo.memoryTypeIndex].propertyFlags & 9912 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) { 9913 skip_call = 9914 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 9915 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM", 9916 "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj %#" PRIxLEAST64, (uint64_t)mem); 9917 } 9918 } 9919 skip_call |= validateMemRange(dev_data, mem, offset, size); 9920 storeMemRanges(dev_data, mem, offset, size); 9921#endif 9922 skip_call |= ValidateMapImageLayouts(device, mem); 9923 lock.unlock(); 9924 9925 if (!skip_call) { 9926 result = dev_data->device_dispatch_table->MapMemory(device, mem, offset, size, flags, ppData); 9927#if MTMERGESOURCE 9928 lock.lock(); 9929 initializeAndTrackMemory(dev_data, mem, size, ppData); 9930 lock.unlock(); 9931#endif 9932 } 9933 return result; 9934} 9935 9936#if MTMERGESOURCE 9937VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkUnmapMemory(VkDevice device, VkDeviceMemory mem) { 9938 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 9939 bool skipCall = false; 9940 9941 std::unique_lock<std::mutex> lock(global_lock); 9942 skipCall |= deleteMemRanges(my_data, mem); 9943 lock.unlock(); 9944 if (!skipCall) { 9945 my_data->device_dispatch_table->UnmapMemory(device, mem); 9946 } 9947} 9948 9949static bool validateMemoryIsMapped(layer_data *my_data, const char *funcName, uint32_t memRangeCount, 9950 const VkMappedMemoryRange *pMemRanges) { 9951 bool skipCall = false; 9952 for (uint32_t i = 0; i < memRangeCount; ++i) { 9953 auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory); 9954 if (mem_element != my_data->memObjMap.end()) { 9955 if (mem_element->second.memRange.offset > pMemRanges[i].offset) { 9956 skipCall |= log_msg( 9957 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 9958 (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM", 9959 "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset " 9960 "(" PRINTF_SIZE_T_SPECIFIER ").", 9961 funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_element->second.memRange.offset)); 9962 } 9963 if ((mem_element->second.memRange.size != VK_WHOLE_SIZE) && 9964 ((mem_element->second.memRange.offset + mem_element->second.memRange.size) < 9965 (pMemRanges[i].offset + pMemRanges[i].size))) { 9966 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 9967 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__, 9968 MEMTRACK_INVALID_MAP, "MEM", "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER 9969 ") exceeds the Memory Object's upper-bound " 9970 "(" PRINTF_SIZE_T_SPECIFIER ").", 9971 funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size), 9972 static_cast<size_t>(mem_element->second.memRange.offset + mem_element->second.memRange.size)); 9973 } 9974 } 9975 } 9976 return skipCall; 9977} 9978 9979static bool validateAndCopyNoncoherentMemoryToDriver(layer_data *my_data, uint32_t memRangeCount, 9980 const VkMappedMemoryRange *pMemRanges) { 9981 bool skipCall = false; 9982 for (uint32_t i = 0; i < memRangeCount; ++i) { 9983 auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory); 9984 if (mem_element != my_data->memObjMap.end()) { 9985 if (mem_element->second.pData) { 9986 VkDeviceSize size = mem_element->second.memRange.size; 9987 VkDeviceSize half_size = (size / 2); 9988 char *data = static_cast<char *>(mem_element->second.pData); 9989 for (auto j = 0; j < half_size; ++j) { 9990 if (data[j] != NoncoherentMemoryFillValue) { 9991 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 9992 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__, 9993 MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64, 9994 (uint64_t)pMemRanges[i].memory); 9995 } 9996 } 9997 for (auto j = size + half_size; j < 2 * size; ++j) { 9998 if (data[j] != NoncoherentMemoryFillValue) { 9999 skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 10000 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__, 10001 MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64, 10002 (uint64_t)pMemRanges[i].memory); 10003 } 10004 } 10005 memcpy(mem_element->second.pDriverData, static_cast<void *>(data + (size_t)(half_size)), (size_t)(size)); 10006 } 10007 } 10008 } 10009 return skipCall; 10010} 10011 10012VK_LAYER_EXPORT VkResult VKAPI_CALL 10013vkFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) { 10014 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 10015 bool skipCall = false; 10016 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 10017 10018 std::unique_lock<std::mutex> lock(global_lock); 10019 skipCall |= validateAndCopyNoncoherentMemoryToDriver(my_data, memRangeCount, pMemRanges); 10020 skipCall |= validateMemoryIsMapped(my_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges); 10021 lock.unlock(); 10022 if (!skipCall) { 10023 result = my_data->device_dispatch_table->FlushMappedMemoryRanges(device, memRangeCount, pMemRanges); 10024 } 10025 return result; 10026} 10027 10028VK_LAYER_EXPORT VkResult VKAPI_CALL 10029vkInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) { 10030 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 10031 bool skipCall = false; 10032 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 10033 10034 std::unique_lock<std::mutex> lock(global_lock); 10035 skipCall |= validateMemoryIsMapped(my_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges); 10036 lock.unlock(); 10037 if (!skipCall) { 10038 result = my_data->device_dispatch_table->InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges); 10039 } 10040 return result; 10041} 10042#endif 10043 10044VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) { 10045 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 10046 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 10047 bool skipCall = false; 10048 std::unique_lock<std::mutex> lock(global_lock); 10049 auto image_node = dev_data->imageMap.find(image); 10050 if (image_node != dev_data->imageMap.end()) { 10051 // Track objects tied to memory 10052 uint64_t image_handle = reinterpret_cast<uint64_t&>(image); 10053 skipCall = set_mem_binding(dev_data, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory"); 10054 VkMemoryRequirements memRequirements; 10055 lock.unlock(); 10056 dev_data->device_dispatch_table->GetImageMemoryRequirements(device, image, &memRequirements); 10057 lock.lock(); 10058 skipCall |= validate_buffer_image_aliasing(dev_data, image_handle, mem, memoryOffset, memRequirements, 10059 dev_data->memObjMap[mem].imageRanges, dev_data->memObjMap[mem].bufferRanges, 10060 VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT); 10061 print_mem_list(dev_data); 10062 lock.unlock(); 10063 if (!skipCall) { 10064 result = dev_data->device_dispatch_table->BindImageMemory(device, image, mem, memoryOffset); 10065 lock.lock(); 10066 dev_data->memObjMap[mem].image = image; 10067 image_node->second.mem = mem; 10068 image_node->second.memOffset = memoryOffset; 10069 image_node->second.memSize = memRequirements.size; 10070 lock.unlock(); 10071 } 10072 } else { 10073 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 10074 reinterpret_cast<const uint64_t &>(image), __LINE__, MEMTRACK_INVALID_OBJECT, "MT", 10075 "vkBindImageMemory: Cannot find invalid image %" PRIx64 ", has it already been deleted?", 10076 reinterpret_cast<const uint64_t &>(image)); 10077 } 10078 return result; 10079} 10080 10081VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent(VkDevice device, VkEvent event) { 10082 bool skip_call = false; 10083 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 10084 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 10085 std::unique_lock<std::mutex> lock(global_lock); 10086 auto event_node = dev_data->eventMap.find(event); 10087 if (event_node != dev_data->eventMap.end()) { 10088 event_node->second.needsSignaled = false; 10089 event_node->second.stageMask = VK_PIPELINE_STAGE_HOST_BIT; 10090 if (event_node->second.in_use.load()) { 10091 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT, 10092 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS", 10093 "Cannot call vkSetEvent() on event %" PRIxLEAST64 " that is already in use by a command buffer.", 10094 reinterpret_cast<const uint64_t &>(event)); 10095 } 10096 } 10097 lock.unlock(); 10098 // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event 10099 // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the 10100 // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297) 10101 for (auto queue_data : dev_data->queueMap) { 10102 auto event_entry = queue_data.second.eventToStageMap.find(event); 10103 if (event_entry != queue_data.second.eventToStageMap.end()) { 10104 event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT; 10105 } 10106 } 10107 if (!skip_call) 10108 result = dev_data->device_dispatch_table->SetEvent(device, event); 10109 return result; 10110} 10111 10112VKAPI_ATTR VkResult VKAPI_CALL 10113vkQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) { 10114 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map); 10115 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 10116 bool skip_call = false; 10117 std::unique_lock<std::mutex> lock(global_lock); 10118 // First verify that fence is not in use 10119 if (fence != VK_NULL_HANDLE) { 10120 dev_data->fenceMap[fence].queue = queue; 10121 if ((bindInfoCount != 0) && dev_data->fenceMap[fence].in_use.load()) { 10122 skip_call |= 10123 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, 10124 reinterpret_cast<uint64_t &>(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS", 10125 "Fence %#" PRIx64 " is already in use by another submission.", reinterpret_cast<uint64_t &>(fence)); 10126 } 10127 if (!dev_data->fenceMap[fence].needsSignaled) { 10128 skip_call |= 10129 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, 10130 reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM", 10131 "Fence %#" PRIxLEAST64 " submitted in SIGNALED state. Fences must be reset before being submitted", 10132 reinterpret_cast<uint64_t &>(fence)); 10133 } 10134 } 10135 for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) { 10136 const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx]; 10137 // Track objects tied to memory 10138 for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) { 10139 for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) { 10140 if (set_sparse_mem_binding(dev_data, bindInfo.pBufferBinds[j].pBinds[k].memory, 10141 (uint64_t)bindInfo.pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, 10142 "vkQueueBindSparse")) 10143 skip_call = true; 10144 } 10145 } 10146 for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) { 10147 for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) { 10148 if (set_sparse_mem_binding(dev_data, bindInfo.pImageOpaqueBinds[j].pBinds[k].memory, 10149 (uint64_t)bindInfo.pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 10150 "vkQueueBindSparse")) 10151 skip_call = true; 10152 } 10153 } 10154 for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) { 10155 for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) { 10156 if (set_sparse_mem_binding(dev_data, bindInfo.pImageBinds[j].pBinds[k].memory, 10157 (uint64_t)bindInfo.pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 10158 "vkQueueBindSparse")) 10159 skip_call = true; 10160 } 10161 } 10162 for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) { 10163 const VkSemaphore &semaphore = bindInfo.pWaitSemaphores[i]; 10164 if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) { 10165 if (dev_data->semaphoreMap[semaphore].signaled) { 10166 dev_data->semaphoreMap[semaphore].signaled = false; 10167 } else { 10168 skip_call |= 10169 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, 10170 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS", 10171 "vkQueueBindSparse: Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 10172 " that has no way to be signaled.", 10173 reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore)); 10174 } 10175 } 10176 } 10177 for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) { 10178 const VkSemaphore &semaphore = bindInfo.pSignalSemaphores[i]; 10179 if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) { 10180 if (dev_data->semaphoreMap[semaphore].signaled) { 10181 skip_call = 10182 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, 10183 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS", 10184 "vkQueueBindSparse: Queue %#" PRIx64 " is signaling semaphore %#" PRIx64 10185 ", but that semaphore is already signaled.", 10186 reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore)); 10187 } 10188 dev_data->semaphoreMap[semaphore].signaled = true; 10189 } 10190 } 10191 } 10192 print_mem_list(dev_data); 10193 lock.unlock(); 10194 10195 if (!skip_call) 10196 return dev_data->device_dispatch_table->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence); 10197 10198 return result; 10199} 10200 10201VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo, 10202 const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) { 10203 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 10204 VkResult result = dev_data->device_dispatch_table->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore); 10205 if (result == VK_SUCCESS) { 10206 std::lock_guard<std::mutex> lock(global_lock); 10207 SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore]; 10208 sNode->signaled = false; 10209 sNode->queue = VK_NULL_HANDLE; 10210 sNode->in_use.store(0); 10211 } 10212 return result; 10213} 10214 10215VKAPI_ATTR VkResult VKAPI_CALL 10216vkCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) { 10217 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 10218 VkResult result = dev_data->device_dispatch_table->CreateEvent(device, pCreateInfo, pAllocator, pEvent); 10219 if (result == VK_SUCCESS) { 10220 std::lock_guard<std::mutex> lock(global_lock); 10221 dev_data->eventMap[*pEvent].needsSignaled = false; 10222 dev_data->eventMap[*pEvent].in_use.store(0); 10223 dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0); 10224 } 10225 return result; 10226} 10227 10228VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo, 10229 const VkAllocationCallbacks *pAllocator, 10230 VkSwapchainKHR *pSwapchain) { 10231 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 10232 VkResult result = dev_data->device_dispatch_table->CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain); 10233 10234 if (VK_SUCCESS == result) { 10235 SWAPCHAIN_NODE *psc_node = new SWAPCHAIN_NODE(pCreateInfo); 10236 std::lock_guard<std::mutex> lock(global_lock); 10237 dev_data->device_extensions.swapchainMap[*pSwapchain] = psc_node; 10238 } 10239 10240 return result; 10241} 10242 10243VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 10244vkDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) { 10245 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 10246 bool skipCall = false; 10247 10248 std::unique_lock<std::mutex> lock(global_lock); 10249 auto swapchain_data = dev_data->device_extensions.swapchainMap.find(swapchain); 10250 if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) { 10251 if (swapchain_data->second->images.size() > 0) { 10252 for (auto swapchain_image : swapchain_data->second->images) { 10253 auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image); 10254 if (image_sub != dev_data->imageSubresourceMap.end()) { 10255 for (auto imgsubpair : image_sub->second) { 10256 auto image_item = dev_data->imageLayoutMap.find(imgsubpair); 10257 if (image_item != dev_data->imageLayoutMap.end()) { 10258 dev_data->imageLayoutMap.erase(image_item); 10259 } 10260 } 10261 dev_data->imageSubresourceMap.erase(image_sub); 10262 } 10263 skipCall = clear_object_binding(dev_data, (uint64_t)swapchain_image, 10264 VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT); 10265 dev_data->imageMap.erase(swapchain_image); 10266 } 10267 } 10268 delete swapchain_data->second; 10269 dev_data->device_extensions.swapchainMap.erase(swapchain); 10270 } 10271 lock.unlock(); 10272 if (!skipCall) 10273 dev_data->device_dispatch_table->DestroySwapchainKHR(device, swapchain, pAllocator); 10274} 10275 10276VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 10277vkGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) { 10278 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 10279 VkResult result = dev_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages); 10280 10281 if (result == VK_SUCCESS && pSwapchainImages != NULL) { 10282 // This should never happen and is checked by param checker. 10283 if (!pCount) 10284 return result; 10285 std::lock_guard<std::mutex> lock(global_lock); 10286 const size_t count = *pCount; 10287 auto swapchain_node = dev_data->device_extensions.swapchainMap[swapchain]; 10288 if (!swapchain_node->images.empty()) { 10289 // TODO : Not sure I like the memcmp here, but it works 10290 const bool mismatch = (swapchain_node->images.size() != count || 10291 memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count)); 10292 if (mismatch) { 10293 // TODO: Verify against Valid Usage section of extension 10294 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, 10295 (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN", 10296 "vkGetSwapchainInfoKHR(%" PRIu64 10297 ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data", 10298 (uint64_t)(swapchain)); 10299 } 10300 } 10301 for (uint32_t i = 0; i < *pCount; ++i) { 10302 IMAGE_LAYOUT_NODE image_layout_node; 10303 image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED; 10304 image_layout_node.format = swapchain_node->createInfo.imageFormat; 10305 auto &image_node = dev_data->imageMap[pSwapchainImages[i]]; 10306 image_node.createInfo.mipLevels = 1; 10307 image_node.createInfo.arrayLayers = swapchain_node->createInfo.imageArrayLayers; 10308 image_node.createInfo.usage = swapchain_node->createInfo.imageUsage; 10309 image_node.valid = false; 10310 image_node.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY; 10311 swapchain_node->images.push_back(pSwapchainImages[i]); 10312 ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()}; 10313 dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair); 10314 dev_data->imageLayoutMap[subpair] = image_layout_node; 10315 dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain; 10316 } 10317 } 10318 return result; 10319} 10320 10321VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) { 10322 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map); 10323 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 10324 bool skip_call = false; 10325 10326 if (pPresentInfo) { 10327 std::lock_guard<std::mutex> lock(global_lock); 10328 for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) { 10329 const VkSemaphore &semaphore = pPresentInfo->pWaitSemaphores[i]; 10330 if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) { 10331 if (dev_data->semaphoreMap[semaphore].signaled) { 10332 dev_data->semaphoreMap[semaphore].signaled = false; 10333 } else { 10334 skip_call |= 10335 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, 10336 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS", 10337 "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.", 10338 reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore)); 10339 } 10340 } 10341 } 10342 VkDeviceMemory mem; 10343 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) { 10344 auto swapchain_data = dev_data->device_extensions.swapchainMap.find(pPresentInfo->pSwapchains[i]); 10345 if (swapchain_data != dev_data->device_extensions.swapchainMap.end() && 10346 pPresentInfo->pImageIndices[i] < swapchain_data->second->images.size()) { 10347 VkImage image = swapchain_data->second->images[pPresentInfo->pImageIndices[i]]; 10348#if MTMERGESOURCE 10349 skip_call |= 10350 get_mem_binding_from_object(dev_data, (uint64_t)(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem); 10351 skip_call |= validate_memory_is_valid(dev_data, mem, "vkQueuePresentKHR()", image); 10352#endif 10353 vector<VkImageLayout> layouts; 10354 if (FindLayouts(dev_data, image, layouts)) { 10355 for (auto layout : layouts) { 10356 if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) { 10357 skip_call |= 10358 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, 10359 reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", 10360 "Images passed to present must be in layout " 10361 "PRESENT_SOURCE_KHR but is in %s", 10362 string_VkImageLayout(layout)); 10363 } 10364 } 10365 } 10366 } 10367 } 10368 } 10369 10370 if (!skip_call) 10371 result = dev_data->device_dispatch_table->QueuePresentKHR(queue, pPresentInfo); 10372 10373 return result; 10374} 10375 10376VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, 10377 VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) { 10378 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 10379 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 10380 bool skipCall = false; 10381 10382 std::unique_lock<std::mutex> lock(global_lock); 10383 if (semaphore != VK_NULL_HANDLE && 10384 dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) { 10385 if (dev_data->semaphoreMap[semaphore].signaled) { 10386 skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, 10387 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS", 10388 "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state"); 10389 } 10390 dev_data->semaphoreMap[semaphore].signaled = true; 10391 } 10392 auto fence_data = dev_data->fenceMap.find(fence); 10393 if (fence_data != dev_data->fenceMap.end()) { 10394 fence_data->second.swapchain = swapchain; 10395 } 10396 lock.unlock(); 10397 10398 if (!skipCall) { 10399 result = 10400 dev_data->device_dispatch_table->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex); 10401 } 10402 10403 return result; 10404} 10405 10406VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL 10407vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo, 10408 const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) { 10409 layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); 10410 VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table; 10411 VkResult res = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback); 10412 if (VK_SUCCESS == res) { 10413 std::lock_guard<std::mutex> lock(global_lock); 10414 res = layer_create_msg_callback(my_data->report_data, pCreateInfo, pAllocator, pMsgCallback); 10415 } 10416 return res; 10417} 10418 10419VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkInstance instance, 10420 VkDebugReportCallbackEXT msgCallback, 10421 const VkAllocationCallbacks *pAllocator) { 10422 layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); 10423 VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table; 10424 pTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator); 10425 std::lock_guard<std::mutex> lock(global_lock); 10426 layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator); 10427} 10428 10429VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL 10430vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object, 10431 size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) { 10432 layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); 10433 my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, 10434 pMsg); 10435} 10436 10437VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) { 10438 if (!strcmp(funcName, "vkGetDeviceProcAddr")) 10439 return (PFN_vkVoidFunction)vkGetDeviceProcAddr; 10440 if (!strcmp(funcName, "vkDestroyDevice")) 10441 return (PFN_vkVoidFunction)vkDestroyDevice; 10442 if (!strcmp(funcName, "vkQueueSubmit")) 10443 return (PFN_vkVoidFunction)vkQueueSubmit; 10444 if (!strcmp(funcName, "vkWaitForFences")) 10445 return (PFN_vkVoidFunction)vkWaitForFences; 10446 if (!strcmp(funcName, "vkGetFenceStatus")) 10447 return (PFN_vkVoidFunction)vkGetFenceStatus; 10448 if (!strcmp(funcName, "vkQueueWaitIdle")) 10449 return (PFN_vkVoidFunction)vkQueueWaitIdle; 10450 if (!strcmp(funcName, "vkDeviceWaitIdle")) 10451 return (PFN_vkVoidFunction)vkDeviceWaitIdle; 10452 if (!strcmp(funcName, "vkGetDeviceQueue")) 10453 return (PFN_vkVoidFunction)vkGetDeviceQueue; 10454 if (!strcmp(funcName, "vkDestroyInstance")) 10455 return (PFN_vkVoidFunction)vkDestroyInstance; 10456 if (!strcmp(funcName, "vkDestroyDevice")) 10457 return (PFN_vkVoidFunction)vkDestroyDevice; 10458 if (!strcmp(funcName, "vkDestroyFence")) 10459 return (PFN_vkVoidFunction)vkDestroyFence; 10460 if (!strcmp(funcName, "vkResetFences")) 10461 return (PFN_vkVoidFunction)vkResetFences; 10462 if (!strcmp(funcName, "vkDestroySemaphore")) 10463 return (PFN_vkVoidFunction)vkDestroySemaphore; 10464 if (!strcmp(funcName, "vkDestroyEvent")) 10465 return (PFN_vkVoidFunction)vkDestroyEvent; 10466 if (!strcmp(funcName, "vkDestroyQueryPool")) 10467 return (PFN_vkVoidFunction)vkDestroyQueryPool; 10468 if (!strcmp(funcName, "vkDestroyBuffer")) 10469 return (PFN_vkVoidFunction)vkDestroyBuffer; 10470 if (!strcmp(funcName, "vkDestroyBufferView")) 10471 return (PFN_vkVoidFunction)vkDestroyBufferView; 10472 if (!strcmp(funcName, "vkDestroyImage")) 10473 return (PFN_vkVoidFunction)vkDestroyImage; 10474 if (!strcmp(funcName, "vkDestroyImageView")) 10475 return (PFN_vkVoidFunction)vkDestroyImageView; 10476 if (!strcmp(funcName, "vkDestroyShaderModule")) 10477 return (PFN_vkVoidFunction)vkDestroyShaderModule; 10478 if (!strcmp(funcName, "vkDestroyPipeline")) 10479 return (PFN_vkVoidFunction)vkDestroyPipeline; 10480 if (!strcmp(funcName, "vkDestroyPipelineLayout")) 10481 return (PFN_vkVoidFunction)vkDestroyPipelineLayout; 10482 if (!strcmp(funcName, "vkDestroySampler")) 10483 return (PFN_vkVoidFunction)vkDestroySampler; 10484 if (!strcmp(funcName, "vkDestroyDescriptorSetLayout")) 10485 return (PFN_vkVoidFunction)vkDestroyDescriptorSetLayout; 10486 if (!strcmp(funcName, "vkDestroyDescriptorPool")) 10487 return (PFN_vkVoidFunction)vkDestroyDescriptorPool; 10488 if (!strcmp(funcName, "vkDestroyFramebuffer")) 10489 return (PFN_vkVoidFunction)vkDestroyFramebuffer; 10490 if (!strcmp(funcName, "vkDestroyRenderPass")) 10491 return (PFN_vkVoidFunction)vkDestroyRenderPass; 10492 if (!strcmp(funcName, "vkCreateBuffer")) 10493 return (PFN_vkVoidFunction)vkCreateBuffer; 10494 if (!strcmp(funcName, "vkCreateBufferView")) 10495 return (PFN_vkVoidFunction)vkCreateBufferView; 10496 if (!strcmp(funcName, "vkCreateImage")) 10497 return (PFN_vkVoidFunction)vkCreateImage; 10498 if (!strcmp(funcName, "vkCreateImageView")) 10499 return (PFN_vkVoidFunction)vkCreateImageView; 10500 if (!strcmp(funcName, "vkCreateFence")) 10501 return (PFN_vkVoidFunction)vkCreateFence; 10502 if (!strcmp(funcName, "CreatePipelineCache")) 10503 return (PFN_vkVoidFunction)vkCreatePipelineCache; 10504 if (!strcmp(funcName, "DestroyPipelineCache")) 10505 return (PFN_vkVoidFunction)vkDestroyPipelineCache; 10506 if (!strcmp(funcName, "GetPipelineCacheData")) 10507 return (PFN_vkVoidFunction)vkGetPipelineCacheData; 10508 if (!strcmp(funcName, "MergePipelineCaches")) 10509 return (PFN_vkVoidFunction)vkMergePipelineCaches; 10510 if (!strcmp(funcName, "vkCreateGraphicsPipelines")) 10511 return (PFN_vkVoidFunction)vkCreateGraphicsPipelines; 10512 if (!strcmp(funcName, "vkCreateComputePipelines")) 10513 return (PFN_vkVoidFunction)vkCreateComputePipelines; 10514 if (!strcmp(funcName, "vkCreateSampler")) 10515 return (PFN_vkVoidFunction)vkCreateSampler; 10516 if (!strcmp(funcName, "vkCreateDescriptorSetLayout")) 10517 return (PFN_vkVoidFunction)vkCreateDescriptorSetLayout; 10518 if (!strcmp(funcName, "vkCreatePipelineLayout")) 10519 return (PFN_vkVoidFunction)vkCreatePipelineLayout; 10520 if (!strcmp(funcName, "vkCreateDescriptorPool")) 10521 return (PFN_vkVoidFunction)vkCreateDescriptorPool; 10522 if (!strcmp(funcName, "vkResetDescriptorPool")) 10523 return (PFN_vkVoidFunction)vkResetDescriptorPool; 10524 if (!strcmp(funcName, "vkAllocateDescriptorSets")) 10525 return (PFN_vkVoidFunction)vkAllocateDescriptorSets; 10526 if (!strcmp(funcName, "vkFreeDescriptorSets")) 10527 return (PFN_vkVoidFunction)vkFreeDescriptorSets; 10528 if (!strcmp(funcName, "vkUpdateDescriptorSets")) 10529 return (PFN_vkVoidFunction)vkUpdateDescriptorSets; 10530 if (!strcmp(funcName, "vkCreateCommandPool")) 10531 return (PFN_vkVoidFunction)vkCreateCommandPool; 10532 if (!strcmp(funcName, "vkDestroyCommandPool")) 10533 return (PFN_vkVoidFunction)vkDestroyCommandPool; 10534 if (!strcmp(funcName, "vkResetCommandPool")) 10535 return (PFN_vkVoidFunction)vkResetCommandPool; 10536 if (!strcmp(funcName, "vkCreateQueryPool")) 10537 return (PFN_vkVoidFunction)vkCreateQueryPool; 10538 if (!strcmp(funcName, "vkAllocateCommandBuffers")) 10539 return (PFN_vkVoidFunction)vkAllocateCommandBuffers; 10540 if (!strcmp(funcName, "vkFreeCommandBuffers")) 10541 return (PFN_vkVoidFunction)vkFreeCommandBuffers; 10542 if (!strcmp(funcName, "vkBeginCommandBuffer")) 10543 return (PFN_vkVoidFunction)vkBeginCommandBuffer; 10544 if (!strcmp(funcName, "vkEndCommandBuffer")) 10545 return (PFN_vkVoidFunction)vkEndCommandBuffer; 10546 if (!strcmp(funcName, "vkResetCommandBuffer")) 10547 return (PFN_vkVoidFunction)vkResetCommandBuffer; 10548 if (!strcmp(funcName, "vkCmdBindPipeline")) 10549 return (PFN_vkVoidFunction)vkCmdBindPipeline; 10550 if (!strcmp(funcName, "vkCmdSetViewport")) 10551 return (PFN_vkVoidFunction)vkCmdSetViewport; 10552 if (!strcmp(funcName, "vkCmdSetScissor")) 10553 return (PFN_vkVoidFunction)vkCmdSetScissor; 10554 if (!strcmp(funcName, "vkCmdSetLineWidth")) 10555 return (PFN_vkVoidFunction)vkCmdSetLineWidth; 10556 if (!strcmp(funcName, "vkCmdSetDepthBias")) 10557 return (PFN_vkVoidFunction)vkCmdSetDepthBias; 10558 if (!strcmp(funcName, "vkCmdSetBlendConstants")) 10559 return (PFN_vkVoidFunction)vkCmdSetBlendConstants; 10560 if (!strcmp(funcName, "vkCmdSetDepthBounds")) 10561 return (PFN_vkVoidFunction)vkCmdSetDepthBounds; 10562 if (!strcmp(funcName, "vkCmdSetStencilCompareMask")) 10563 return (PFN_vkVoidFunction)vkCmdSetStencilCompareMask; 10564 if (!strcmp(funcName, "vkCmdSetStencilWriteMask")) 10565 return (PFN_vkVoidFunction)vkCmdSetStencilWriteMask; 10566 if (!strcmp(funcName, "vkCmdSetStencilReference")) 10567 return (PFN_vkVoidFunction)vkCmdSetStencilReference; 10568 if (!strcmp(funcName, "vkCmdBindDescriptorSets")) 10569 return (PFN_vkVoidFunction)vkCmdBindDescriptorSets; 10570 if (!strcmp(funcName, "vkCmdBindVertexBuffers")) 10571 return (PFN_vkVoidFunction)vkCmdBindVertexBuffers; 10572 if (!strcmp(funcName, "vkCmdBindIndexBuffer")) 10573 return (PFN_vkVoidFunction)vkCmdBindIndexBuffer; 10574 if (!strcmp(funcName, "vkCmdDraw")) 10575 return (PFN_vkVoidFunction)vkCmdDraw; 10576 if (!strcmp(funcName, "vkCmdDrawIndexed")) 10577 return (PFN_vkVoidFunction)vkCmdDrawIndexed; 10578 if (!strcmp(funcName, "vkCmdDrawIndirect")) 10579 return (PFN_vkVoidFunction)vkCmdDrawIndirect; 10580 if (!strcmp(funcName, "vkCmdDrawIndexedIndirect")) 10581 return (PFN_vkVoidFunction)vkCmdDrawIndexedIndirect; 10582 if (!strcmp(funcName, "vkCmdDispatch")) 10583 return (PFN_vkVoidFunction)vkCmdDispatch; 10584 if (!strcmp(funcName, "vkCmdDispatchIndirect")) 10585 return (PFN_vkVoidFunction)vkCmdDispatchIndirect; 10586 if (!strcmp(funcName, "vkCmdCopyBuffer")) 10587 return (PFN_vkVoidFunction)vkCmdCopyBuffer; 10588 if (!strcmp(funcName, "vkCmdCopyImage")) 10589 return (PFN_vkVoidFunction)vkCmdCopyImage; 10590 if (!strcmp(funcName, "vkCmdBlitImage")) 10591 return (PFN_vkVoidFunction)vkCmdBlitImage; 10592 if (!strcmp(funcName, "vkCmdCopyBufferToImage")) 10593 return (PFN_vkVoidFunction)vkCmdCopyBufferToImage; 10594 if (!strcmp(funcName, "vkCmdCopyImageToBuffer")) 10595 return (PFN_vkVoidFunction)vkCmdCopyImageToBuffer; 10596 if (!strcmp(funcName, "vkCmdUpdateBuffer")) 10597 return (PFN_vkVoidFunction)vkCmdUpdateBuffer; 10598 if (!strcmp(funcName, "vkCmdFillBuffer")) 10599 return (PFN_vkVoidFunction)vkCmdFillBuffer; 10600 if (!strcmp(funcName, "vkCmdClearColorImage")) 10601 return (PFN_vkVoidFunction)vkCmdClearColorImage; 10602 if (!strcmp(funcName, "vkCmdClearDepthStencilImage")) 10603 return (PFN_vkVoidFunction)vkCmdClearDepthStencilImage; 10604 if (!strcmp(funcName, "vkCmdClearAttachments")) 10605 return (PFN_vkVoidFunction)vkCmdClearAttachments; 10606 if (!strcmp(funcName, "vkCmdResolveImage")) 10607 return (PFN_vkVoidFunction)vkCmdResolveImage; 10608 if (!strcmp(funcName, "vkCmdSetEvent")) 10609 return (PFN_vkVoidFunction)vkCmdSetEvent; 10610 if (!strcmp(funcName, "vkCmdResetEvent")) 10611 return (PFN_vkVoidFunction)vkCmdResetEvent; 10612 if (!strcmp(funcName, "vkCmdWaitEvents")) 10613 return (PFN_vkVoidFunction)vkCmdWaitEvents; 10614 if (!strcmp(funcName, "vkCmdPipelineBarrier")) 10615 return (PFN_vkVoidFunction)vkCmdPipelineBarrier; 10616 if (!strcmp(funcName, "vkCmdBeginQuery")) 10617 return (PFN_vkVoidFunction)vkCmdBeginQuery; 10618 if (!strcmp(funcName, "vkCmdEndQuery")) 10619 return (PFN_vkVoidFunction)vkCmdEndQuery; 10620 if (!strcmp(funcName, "vkCmdResetQueryPool")) 10621 return (PFN_vkVoidFunction)vkCmdResetQueryPool; 10622 if (!strcmp(funcName, "vkCmdCopyQueryPoolResults")) 10623 return (PFN_vkVoidFunction)vkCmdCopyQueryPoolResults; 10624 if (!strcmp(funcName, "vkCmdPushConstants")) 10625 return (PFN_vkVoidFunction)vkCmdPushConstants; 10626 if (!strcmp(funcName, "vkCmdWriteTimestamp")) 10627 return (PFN_vkVoidFunction)vkCmdWriteTimestamp; 10628 if (!strcmp(funcName, "vkCreateFramebuffer")) 10629 return (PFN_vkVoidFunction)vkCreateFramebuffer; 10630 if (!strcmp(funcName, "vkCreateShaderModule")) 10631 return (PFN_vkVoidFunction)vkCreateShaderModule; 10632 if (!strcmp(funcName, "vkCreateRenderPass")) 10633 return (PFN_vkVoidFunction)vkCreateRenderPass; 10634 if (!strcmp(funcName, "vkCmdBeginRenderPass")) 10635 return (PFN_vkVoidFunction)vkCmdBeginRenderPass; 10636 if (!strcmp(funcName, "vkCmdNextSubpass")) 10637 return (PFN_vkVoidFunction)vkCmdNextSubpass; 10638 if (!strcmp(funcName, "vkCmdEndRenderPass")) 10639 return (PFN_vkVoidFunction)vkCmdEndRenderPass; 10640 if (!strcmp(funcName, "vkCmdExecuteCommands")) 10641 return (PFN_vkVoidFunction)vkCmdExecuteCommands; 10642 if (!strcmp(funcName, "vkSetEvent")) 10643 return (PFN_vkVoidFunction)vkSetEvent; 10644 if (!strcmp(funcName, "vkMapMemory")) 10645 return (PFN_vkVoidFunction)vkMapMemory; 10646#if MTMERGESOURCE 10647 if (!strcmp(funcName, "vkUnmapMemory")) 10648 return (PFN_vkVoidFunction)vkUnmapMemory; 10649 if (!strcmp(funcName, "vkAllocateMemory")) 10650 return (PFN_vkVoidFunction)vkAllocateMemory; 10651 if (!strcmp(funcName, "vkFreeMemory")) 10652 return (PFN_vkVoidFunction)vkFreeMemory; 10653 if (!strcmp(funcName, "vkFlushMappedMemoryRanges")) 10654 return (PFN_vkVoidFunction)vkFlushMappedMemoryRanges; 10655 if (!strcmp(funcName, "vkInvalidateMappedMemoryRanges")) 10656 return (PFN_vkVoidFunction)vkInvalidateMappedMemoryRanges; 10657 if (!strcmp(funcName, "vkBindBufferMemory")) 10658 return (PFN_vkVoidFunction)vkBindBufferMemory; 10659 if (!strcmp(funcName, "vkGetBufferMemoryRequirements")) 10660 return (PFN_vkVoidFunction)vkGetBufferMemoryRequirements; 10661 if (!strcmp(funcName, "vkGetImageMemoryRequirements")) 10662 return (PFN_vkVoidFunction)vkGetImageMemoryRequirements; 10663#endif 10664 if (!strcmp(funcName, "vkGetQueryPoolResults")) 10665 return (PFN_vkVoidFunction)vkGetQueryPoolResults; 10666 if (!strcmp(funcName, "vkBindImageMemory")) 10667 return (PFN_vkVoidFunction)vkBindImageMemory; 10668 if (!strcmp(funcName, "vkQueueBindSparse")) 10669 return (PFN_vkVoidFunction)vkQueueBindSparse; 10670 if (!strcmp(funcName, "vkCreateSemaphore")) 10671 return (PFN_vkVoidFunction)vkCreateSemaphore; 10672 if (!strcmp(funcName, "vkCreateEvent")) 10673 return (PFN_vkVoidFunction)vkCreateEvent; 10674 10675 if (dev == NULL) 10676 return NULL; 10677 10678 layer_data *dev_data; 10679 dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map); 10680 10681 if (dev_data->device_extensions.wsi_enabled) { 10682 if (!strcmp(funcName, "vkCreateSwapchainKHR")) 10683 return (PFN_vkVoidFunction)vkCreateSwapchainKHR; 10684 if (!strcmp(funcName, "vkDestroySwapchainKHR")) 10685 return (PFN_vkVoidFunction)vkDestroySwapchainKHR; 10686 if (!strcmp(funcName, "vkGetSwapchainImagesKHR")) 10687 return (PFN_vkVoidFunction)vkGetSwapchainImagesKHR; 10688 if (!strcmp(funcName, "vkAcquireNextImageKHR")) 10689 return (PFN_vkVoidFunction)vkAcquireNextImageKHR; 10690 if (!strcmp(funcName, "vkQueuePresentKHR")) 10691 return (PFN_vkVoidFunction)vkQueuePresentKHR; 10692 } 10693 10694 VkLayerDispatchTable *pTable = dev_data->device_dispatch_table; 10695 { 10696 if (pTable->GetDeviceProcAddr == NULL) 10697 return NULL; 10698 return pTable->GetDeviceProcAddr(dev, funcName); 10699 } 10700} 10701 10702VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) { 10703 if (!strcmp(funcName, "vkGetInstanceProcAddr")) 10704 return (PFN_vkVoidFunction)vkGetInstanceProcAddr; 10705 if (!strcmp(funcName, "vkGetDeviceProcAddr")) 10706 return (PFN_vkVoidFunction)vkGetDeviceProcAddr; 10707 if (!strcmp(funcName, "vkCreateInstance")) 10708 return (PFN_vkVoidFunction)vkCreateInstance; 10709 if (!strcmp(funcName, "vkCreateDevice")) 10710 return (PFN_vkVoidFunction)vkCreateDevice; 10711 if (!strcmp(funcName, "vkDestroyInstance")) 10712 return (PFN_vkVoidFunction)vkDestroyInstance; 10713 if (!strcmp(funcName, "vkEnumerateInstanceLayerProperties")) 10714 return (PFN_vkVoidFunction)vkEnumerateInstanceLayerProperties; 10715 if (!strcmp(funcName, "vkEnumerateInstanceExtensionProperties")) 10716 return (PFN_vkVoidFunction)vkEnumerateInstanceExtensionProperties; 10717 if (!strcmp(funcName, "vkEnumerateDeviceLayerProperties")) 10718 return (PFN_vkVoidFunction)vkEnumerateDeviceLayerProperties; 10719 if (!strcmp(funcName, "vkEnumerateDeviceExtensionProperties")) 10720 return (PFN_vkVoidFunction)vkEnumerateDeviceExtensionProperties; 10721 10722 if (instance == NULL) 10723 return NULL; 10724 10725 PFN_vkVoidFunction fptr; 10726 10727 layer_data *my_data; 10728 my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); 10729 fptr = debug_report_get_instance_proc_addr(my_data->report_data, funcName); 10730 if (fptr) 10731 return fptr; 10732 10733 VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table; 10734 if (pTable->GetInstanceProcAddr == NULL) 10735 return NULL; 10736 return pTable->GetInstanceProcAddr(instance, funcName); 10737} 10738