1/* Copyright (c) 2015-2016 The Khronos Group Inc. 2 * Copyright (c) 2015-2016 Valve Corporation 3 * Copyright (c) 2015-2016 LunarG, Inc. 4 * Copyright (C) 2015-2016 Google Inc. 5 * 6 * Licensed under the Apache License, Version 2.0 (the "License"); 7 * you may not use this file except in compliance with the License. 8 * You may obtain a copy of the License at 9 * 10 * http://www.apache.org/licenses/LICENSE-2.0 11 * 12 * Unless required by applicable law or agreed to in writing, software 13 * distributed under the License is distributed on an "AS IS" BASIS, 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 * See the License for the specific language governing permissions and 16 * limitations under the License. 17 * 18 * Author: Jon Ashburn <jon@lunarg.com> 19 * Author: Mark Lobodzinski <mark@lunarg.com> 20 * Author: Tobin Ehlis <tobin@lunarg.com> 21 */ 22 23#include <mutex> 24 25#include "vulkan/vk_layer.h" 26#include "vk_layer_extension_utils.h" 27#include "vk_enum_string_helper.h" 28#include "vk_layer_table.h" 29#include "vk_layer_utils.h" 30 31namespace object_tracker { 32 33// Object Tracker ERROR codes 34enum OBJECT_TRACK_ERROR { 35 OBJTRACK_NONE, // Used for INFO & other non-error messages 36 OBJTRACK_UNKNOWN_OBJECT, // Updating uses of object that's not in global object list 37 OBJTRACK_INTERNAL_ERROR, // Bug with data tracking within the layer 38 OBJTRACK_OBJECT_LEAK, // OBJECT was not correctly freed/destroyed 39 OBJTRACK_INVALID_OBJECT, // Object used that has never been created 40 OBJTRACK_DESCRIPTOR_POOL_MISMATCH, // Descriptor Pools specified incorrectly 41 OBJTRACK_COMMAND_POOL_MISMATCH, // Command Pools specified incorrectly 42}; 43 44// Object Status -- used to track state of individual objects 45typedef VkFlags ObjectStatusFlags; 46enum ObjectStatusFlagBits { 47 OBJSTATUS_NONE = 0x00000000, // No status is set 48 OBJSTATUS_FENCE_IS_SUBMITTED = 0x00000001, // Fence has been submitted 49 OBJSTATUS_VIEWPORT_BOUND = 0x00000002, // Viewport state object has been bound 50 OBJSTATUS_RASTER_BOUND = 0x00000004, // Viewport state object has been bound 51 OBJSTATUS_COLOR_BLEND_BOUND = 0x00000008, // Viewport state object has been bound 52 OBJSTATUS_DEPTH_STENCIL_BOUND = 0x00000010, // Viewport state object has been bound 53 OBJSTATUS_GPU_MEM_MAPPED = 0x00000020, // Memory object is currently mapped 54 OBJSTATUS_COMMAND_BUFFER_SECONDARY = 0x00000040, // Command Buffer is of type SECONDARY 55}; 56 57struct OBJTRACK_NODE { 58 uint64_t vkObj; // Object handle 59 VkDebugReportObjectTypeEXT objType; // Object type identifier 60 ObjectStatusFlags status; // Object state 61 uint64_t parentObj; // Parent object 62 uint64_t belongsTo; // Object Scope -- owning device/instance 63}; 64 65// prototype for extension functions 66uint64_t objTrackGetObjectCount(VkDevice device); 67uint64_t objTrackGetObjectsOfTypeCount(VkDevice, VkDebugReportObjectTypeEXT type); 68 69// Func ptr typedefs 70typedef uint64_t (*OBJ_TRACK_GET_OBJECT_COUNT)(VkDevice); 71typedef uint64_t (*OBJ_TRACK_GET_OBJECTS_OF_TYPE_COUNT)(VkDevice, VkDebugReportObjectTypeEXT); 72 73struct layer_data { 74 VkInstance instance; 75 76 debug_report_data *report_data; 77 // TODO: put instance data here 78 std::vector<VkDebugReportCallbackEXT> logging_callback; 79 bool wsi_enabled; 80 bool objtrack_extensions_enabled; 81 // The following are for keeping track of the temporary callbacks that can 82 // be used in vkCreateInstance and vkDestroyInstance: 83 uint32_t num_tmp_callbacks; 84 VkDebugReportCallbackCreateInfoEXT *tmp_dbg_create_infos; 85 VkDebugReportCallbackEXT *tmp_callbacks; 86 87 layer_data() 88 : report_data(nullptr), wsi_enabled(false), objtrack_extensions_enabled(false), num_tmp_callbacks(0), 89 tmp_dbg_create_infos(nullptr), tmp_callbacks(nullptr){}; 90}; 91 92struct instExts { 93 bool wsi_enabled; 94}; 95 96static std::unordered_map<void *, struct instExts> instanceExtMap; 97static std::unordered_map<void *, layer_data *> layer_data_map; 98static device_table_map object_tracker_device_table_map; 99static instance_table_map object_tracker_instance_table_map; 100 101// We need additionally validate image usage using a separate map 102// of swapchain-created images 103static std::unordered_map<uint64_t, OBJTRACK_NODE *> swapchainImageMap; 104 105static long long unsigned int object_track_index = 0; 106static std::mutex global_lock; 107 108#define NUM_OBJECT_TYPES (VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT + 1) 109 110static uint64_t numObjs[NUM_OBJECT_TYPES] = {0}; 111static uint64_t numTotalObjs = 0; 112std::vector<VkQueueFamilyProperties> queue_family_properties; 113 114// 115// Internal Object Tracker Functions 116// 117 118static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) { 119 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); 120 VkLayerDispatchTable *pDisp = get_dispatch_table(object_tracker_device_table_map, device); 121 PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr; 122 pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR"); 123 pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR"); 124 pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR"); 125 pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR"); 126 pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR"); 127 my_device_data->wsi_enabled = false; 128 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { 129 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0) 130 my_device_data->wsi_enabled = true; 131 132 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], "OBJTRACK_EXTENSIONS") == 0) 133 my_device_data->objtrack_extensions_enabled = true; 134 } 135} 136 137static void createInstanceRegisterExtensions(const VkInstanceCreateInfo *pCreateInfo, VkInstance instance) { 138 uint32_t i; 139 VkLayerInstanceDispatchTable *pDisp = get_dispatch_table(object_tracker_instance_table_map, instance); 140 PFN_vkGetInstanceProcAddr gpa = pDisp->GetInstanceProcAddr; 141 142 pDisp->DestroySurfaceKHR = (PFN_vkDestroySurfaceKHR)gpa(instance, "vkDestroySurfaceKHR"); 143 pDisp->GetPhysicalDeviceSurfaceSupportKHR = 144 (PFN_vkGetPhysicalDeviceSurfaceSupportKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceSupportKHR"); 145 pDisp->GetPhysicalDeviceSurfaceCapabilitiesKHR = 146 (PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR"); 147 pDisp->GetPhysicalDeviceSurfaceFormatsKHR = 148 (PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceFormatsKHR"); 149 pDisp->GetPhysicalDeviceSurfacePresentModesKHR = 150 (PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)gpa(instance, "vkGetPhysicalDeviceSurfacePresentModesKHR"); 151 152#if VK_USE_PLATFORM_WIN32_KHR 153 pDisp->CreateWin32SurfaceKHR = (PFN_vkCreateWin32SurfaceKHR)gpa(instance, "vkCreateWin32SurfaceKHR"); 154 pDisp->GetPhysicalDeviceWin32PresentationSupportKHR = 155 (PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceWin32PresentationSupportKHR"); 156#endif // VK_USE_PLATFORM_WIN32_KHR 157#ifdef VK_USE_PLATFORM_XCB_KHR 158 pDisp->CreateXcbSurfaceKHR = (PFN_vkCreateXcbSurfaceKHR)gpa(instance, "vkCreateXcbSurfaceKHR"); 159 pDisp->GetPhysicalDeviceXcbPresentationSupportKHR = 160 (PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceXcbPresentationSupportKHR"); 161#endif // VK_USE_PLATFORM_XCB_KHR 162#ifdef VK_USE_PLATFORM_XLIB_KHR 163 pDisp->CreateXlibSurfaceKHR = (PFN_vkCreateXlibSurfaceKHR)gpa(instance, "vkCreateXlibSurfaceKHR"); 164 pDisp->GetPhysicalDeviceXlibPresentationSupportKHR = 165 (PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceXlibPresentationSupportKHR"); 166#endif // VK_USE_PLATFORM_XLIB_KHR 167#ifdef VK_USE_PLATFORM_MIR_KHR 168 pDisp->CreateMirSurfaceKHR = (PFN_vkCreateMirSurfaceKHR)gpa(instance, "vkCreateMirSurfaceKHR"); 169 pDisp->GetPhysicalDeviceMirPresentationSupportKHR = 170 (PFN_vkGetPhysicalDeviceMirPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceMirPresentationSupportKHR"); 171#endif // VK_USE_PLATFORM_MIR_KHR 172#ifdef VK_USE_PLATFORM_WAYLAND_KHR 173 pDisp->CreateWaylandSurfaceKHR = (PFN_vkCreateWaylandSurfaceKHR)gpa(instance, "vkCreateWaylandSurfaceKHR"); 174 pDisp->GetPhysicalDeviceWaylandPresentationSupportKHR = 175 (PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceWaylandPresentationSupportKHR"); 176#endif // VK_USE_PLATFORM_WAYLAND_KHR 177#ifdef VK_USE_PLATFORM_ANDROID_KHR 178 pDisp->CreateAndroidSurfaceKHR = (PFN_vkCreateAndroidSurfaceKHR)gpa(instance, "vkCreateAndroidSurfaceKHR"); 179#endif // VK_USE_PLATFORM_ANDROID_KHR 180 181 instanceExtMap[pDisp].wsi_enabled = false; 182 for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) { 183 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SURFACE_EXTENSION_NAME) == 0) 184 instanceExtMap[pDisp].wsi_enabled = true; 185 } 186} 187 188// Indicate device or instance dispatch table type 189enum DispTableType { 190 DISP_TBL_TYPE_INSTANCE, 191 DISP_TBL_TYPE_DEVICE, 192}; 193 194debug_report_data *mdd(const void *object) { 195 dispatch_key key = get_dispatch_key(object); 196 layer_data *my_data = get_my_data_ptr(key, layer_data_map); 197 return my_data->report_data; 198} 199 200debug_report_data *mid(VkInstance object) { 201 dispatch_key key = get_dispatch_key(object); 202 layer_data *my_data = get_my_data_ptr(key, layer_data_map); 203 return my_data->report_data; 204} 205 206// For each Queue's doubly linked-list of mem refs 207struct OT_MEM_INFO { 208 VkDeviceMemory mem; 209 OT_MEM_INFO *pNextMI; 210 OT_MEM_INFO *pPrevMI; 211}; 212 213// Track Queue information 214struct OT_QUEUE_INFO { 215 OT_MEM_INFO *pMemRefList; 216 uint32_t queueNodeIndex; 217 VkQueue queue; 218 uint32_t refCount; 219}; 220 221// Global map of structures, one per queue 222std::unordered_map<VkQueue, OT_QUEUE_INFO *> queue_info_map; 223 224#include "vk_dispatch_table_helper.h" 225 226static void init_object_tracker(layer_data *my_data, const VkAllocationCallbacks *pAllocator) { 227 228 layer_debug_actions(my_data->report_data, my_data->logging_callback, pAllocator, "lunarg_object_tracker"); 229} 230 231// 232// Forward declarations 233// 234 235static void create_physical_device(VkInstance dispatchable_object, VkPhysicalDevice vkObj, VkDebugReportObjectTypeEXT objType); 236static void create_instance(VkInstance dispatchable_object, VkInstance object, VkDebugReportObjectTypeEXT objType); 237static void create_device(VkDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType); 238static void create_device(VkPhysicalDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType); 239static void create_queue(VkDevice dispatchable_object, VkQueue vkObj, VkDebugReportObjectTypeEXT objType); 240static bool validate_image(VkQueue dispatchable_object, VkImage object, VkDebugReportObjectTypeEXT objType, bool null_allowed); 241static bool validate_instance(VkInstance dispatchable_object, VkInstance object, VkDebugReportObjectTypeEXT objType, 242 bool null_allowed); 243static bool validate_device(VkDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType, 244 bool null_allowed); 245static bool validate_descriptor_pool(VkDevice dispatchable_object, VkDescriptorPool object, VkDebugReportObjectTypeEXT objType, 246 bool null_allowed); 247static bool validate_descriptor_set_layout(VkDevice dispatchable_object, VkDescriptorSetLayout object, 248 VkDebugReportObjectTypeEXT objType, bool null_allowed); 249static bool validate_command_pool(VkDevice dispatchable_object, VkCommandPool object, VkDebugReportObjectTypeEXT objType, 250 bool null_allowed); 251static bool validate_buffer(VkQueue dispatchable_object, VkBuffer object, VkDebugReportObjectTypeEXT objType, 252 bool null_allowed); 253static void create_pipeline(VkDevice dispatchable_object, VkPipeline vkObj, VkDebugReportObjectTypeEXT objType); 254static bool validate_pipeline_cache(VkDevice dispatchable_object, VkPipelineCache object, VkDebugReportObjectTypeEXT objType, 255 bool null_allowed); 256static bool validate_render_pass(VkDevice dispatchable_object, VkRenderPass object, VkDebugReportObjectTypeEXT objType, 257 bool null_allowed); 258static bool validate_shader_module(VkDevice dispatchable_object, VkShaderModule object, VkDebugReportObjectTypeEXT objType, 259 bool null_allowed); 260static bool validate_pipeline_layout(VkDevice dispatchable_object, VkPipelineLayout object, VkDebugReportObjectTypeEXT objType, 261 bool null_allowed); 262static bool validate_pipeline(VkDevice dispatchable_object, VkPipeline object, VkDebugReportObjectTypeEXT objType, 263 bool null_allowed); 264static void destroy_command_pool(VkDevice dispatchable_object, VkCommandPool object); 265static void destroy_descriptor_pool(VkDevice dispatchable_object, VkDescriptorPool object); 266static void destroy_descriptor_set(VkDevice dispatchable_object, VkDescriptorSet object); 267static void destroy_device_memory(VkDevice dispatchable_object, VkDeviceMemory object); 268static void destroy_swapchain_khr(VkDevice dispatchable_object, VkSwapchainKHR object); 269static bool set_device_memory_status(VkDevice dispatchable_object, VkDeviceMemory object, VkDebugReportObjectTypeEXT objType, 270 ObjectStatusFlags status_flag); 271static bool reset_device_memory_status(VkDevice dispatchable_object, VkDeviceMemory object, VkDebugReportObjectTypeEXT objType, 272 ObjectStatusFlags status_flag); 273static void destroy_queue(VkQueue dispatchable_object, VkQueue object); 274 275extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkPhysicalDeviceMap; 276extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkDeviceMap; 277extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkImageMap; 278extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkQueueMap; 279extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkDescriptorSetMap; 280extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkBufferMap; 281extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkFenceMap; 282extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkSemaphoreMap; 283extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkCommandPoolMap; 284extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkCommandBufferMap; 285extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkSwapchainKHRMap; 286extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkSurfaceKHRMap; 287extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkQueueMap; 288 289// Convert an object type enum to an object type array index 290static uint32_t objTypeToIndex(uint32_t objType) { 291 uint32_t index = objType; 292 return index; 293} 294 295// Add new queue to head of global queue list 296static void addQueueInfo(uint32_t queueNodeIndex, VkQueue queue) { 297 auto queueItem = queue_info_map.find(queue); 298 if (queueItem == queue_info_map.end()) { 299 OT_QUEUE_INFO *p_queue_info = new OT_QUEUE_INFO; 300 if (p_queue_info != NULL) { 301 memset(p_queue_info, 0, sizeof(OT_QUEUE_INFO)); 302 p_queue_info->queue = queue; 303 p_queue_info->queueNodeIndex = queueNodeIndex; 304 queue_info_map[queue] = p_queue_info; 305 } else { 306 log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, 307 reinterpret_cast<uint64_t>(queue), __LINE__, OBJTRACK_INTERNAL_ERROR, "OBJTRACK", 308 "ERROR: VK_ERROR_OUT_OF_HOST_MEMORY -- could not allocate memory for Queue Information"); 309 } 310 } 311} 312 313// Destroy memRef lists and free all memory 314static void destroyQueueMemRefLists() { 315 for (auto queue_item : queue_info_map) { 316 OT_MEM_INFO *p_mem_info = queue_item.second->pMemRefList; 317 while (p_mem_info != NULL) { 318 OT_MEM_INFO *p_del_mem_info = p_mem_info; 319 p_mem_info = p_mem_info->pNextMI; 320 delete p_del_mem_info; 321 } 322 delete queue_item.second; 323 } 324 queue_info_map.clear(); 325 326 // Destroy the items in the queue map 327 auto queue = VkQueueMap.begin(); 328 while (queue != VkQueueMap.end()) { 329 uint32_t obj_index = objTypeToIndex(queue->second->objType); 330 assert(numTotalObjs > 0); 331 numTotalObjs--; 332 assert(numObjs[obj_index] > 0); 333 numObjs[obj_index]--; 334 log_msg(mdd(reinterpret_cast<VkQueue>(queue->second->vkObj)), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, queue->second->objType, 335 queue->second->vkObj, __LINE__, OBJTRACK_NONE, "OBJTRACK", 336 "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).", 337 string_VkDebugReportObjectTypeEXT(queue->second->objType), queue->second->vkObj, numTotalObjs, numObjs[obj_index], 338 string_VkDebugReportObjectTypeEXT(queue->second->objType)); 339 delete queue->second; 340 queue = VkQueueMap.erase(queue); 341 } 342} 343 344// Check Queue type flags for selected queue operations 345static void validateQueueFlags(VkQueue queue, const char *function) { 346 347 auto queue_item = queue_info_map.find(queue); 348 if (queue_item != queue_info_map.end()) { 349 OT_QUEUE_INFO *pQueueInfo = queue_item->second; 350 if (pQueueInfo != NULL) { 351 if ((queue_family_properties[pQueueInfo->queueNodeIndex].queueFlags & VK_QUEUE_SPARSE_BINDING_BIT) == 0) { 352 log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, 353 reinterpret_cast<uint64_t>(queue), __LINE__, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK", 354 "Attempting %s on a non-memory-management capable queue -- VK_QUEUE_SPARSE_BINDING_BIT not set", function); 355 } 356 } 357 } 358} 359 360static void create_physical_device(VkInstance instance, VkPhysicalDevice vkObj, VkDebugReportObjectTypeEXT objType) { 361 log_msg(mdd(instance), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__, 362 OBJTRACK_NONE, "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++, 363 string_VkDebugReportObjectTypeEXT(objType), reinterpret_cast<uint64_t>(vkObj)); 364 365 uint64_t physical_device_handle = reinterpret_cast<uint64_t>(vkObj); 366 auto pd_item = VkPhysicalDeviceMap.find(physical_device_handle); 367 if (pd_item == VkPhysicalDeviceMap.end()) { 368 OBJTRACK_NODE *p_new_obj_node = new OBJTRACK_NODE; 369 p_new_obj_node->objType = objType; 370 p_new_obj_node->belongsTo = reinterpret_cast<uint64_t>(instance); 371 p_new_obj_node->status = OBJSTATUS_NONE; 372 p_new_obj_node->vkObj = physical_device_handle; 373 VkPhysicalDeviceMap[physical_device_handle] = p_new_obj_node; 374 uint32_t objIndex = objTypeToIndex(objType); 375 numObjs[objIndex]++; 376 numTotalObjs++; 377 } 378} 379 380static void create_surface_khr(VkInstance dispatchable_object, VkSurfaceKHR vkObj, VkDebugReportObjectTypeEXT objType) { 381 // TODO: Add tracking of surface objects 382 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE, 383 "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++, 384 string_VkDebugReportObjectTypeEXT(objType), (uint64_t)(vkObj)); 385 386 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE; 387 pNewObjNode->objType = objType; 388 pNewObjNode->belongsTo = (uint64_t)dispatchable_object; 389 pNewObjNode->status = OBJSTATUS_NONE; 390 pNewObjNode->vkObj = (uint64_t)(vkObj); 391 VkSurfaceKHRMap[(uint64_t)vkObj] = pNewObjNode; 392 uint32_t objIndex = objTypeToIndex(objType); 393 numObjs[objIndex]++; 394 numTotalObjs++; 395} 396 397static void destroy_surface_khr(VkInstance dispatchable_object, VkSurfaceKHR object) { 398 uint64_t object_handle = (uint64_t)(object); 399 if (VkSurfaceKHRMap.find(object_handle) != VkSurfaceKHRMap.end()) { 400 OBJTRACK_NODE *pNode = VkSurfaceKHRMap[(uint64_t)object]; 401 uint32_t objIndex = objTypeToIndex(pNode->objType); 402 assert(numTotalObjs > 0); 403 numTotalObjs--; 404 assert(numObjs[objIndex] > 0); 405 numObjs[objIndex]--; 406 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType, object_handle, __LINE__, 407 OBJTRACK_NONE, "OBJTRACK", 408 "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (0x%" PRIx64 " total objs remain & 0x%" PRIx64 " %s objs).", 409 string_VkDebugReportObjectTypeEXT(pNode->objType), (uint64_t)(object), numTotalObjs, numObjs[objIndex], 410 string_VkDebugReportObjectTypeEXT(pNode->objType)); 411 delete pNode; 412 VkSurfaceKHRMap.erase(object_handle); 413 } else { 414 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, object_handle, __LINE__, 415 OBJTRACK_NONE, "OBJTRACK", 416 "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?", object_handle); 417 } 418} 419 420static void alloc_command_buffer(VkDevice device, VkCommandPool commandPool, VkCommandBuffer vkObj, 421 VkDebugReportObjectTypeEXT objType, VkCommandBufferLevel level) { 422 log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__, OBJTRACK_NONE, 423 "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++, 424 string_VkDebugReportObjectTypeEXT(objType), reinterpret_cast<uint64_t>(vkObj)); 425 426 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE; 427 pNewObjNode->objType = objType; 428 pNewObjNode->belongsTo = (uint64_t)device; 429 pNewObjNode->vkObj = reinterpret_cast<uint64_t>(vkObj); 430 pNewObjNode->parentObj = (uint64_t)commandPool; 431 if (level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) { 432 pNewObjNode->status = OBJSTATUS_COMMAND_BUFFER_SECONDARY; 433 } else { 434 pNewObjNode->status = OBJSTATUS_NONE; 435 } 436 VkCommandBufferMap[reinterpret_cast<uint64_t>(vkObj)] = pNewObjNode; 437 uint32_t objIndex = objTypeToIndex(objType); 438 numObjs[objIndex]++; 439 numTotalObjs++; 440} 441 442static bool validate_command_buffer(VkDevice device, VkCommandPool commandPool, VkCommandBuffer commandBuffer) { 443 bool skipCall = false; 444 uint64_t object_handle = reinterpret_cast<uint64_t>(commandBuffer); 445 if (VkCommandBufferMap.find(object_handle) != VkCommandBufferMap.end()) { 446 OBJTRACK_NODE *pNode = VkCommandBufferMap[(uint64_t)commandBuffer]; 447 448 if (pNode->parentObj != (uint64_t)(commandPool)) { 449 skipCall |= log_msg( 450 mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, pNode->objType, object_handle, __LINE__, OBJTRACK_COMMAND_POOL_MISMATCH, 451 "OBJTRACK", "FreeCommandBuffers is attempting to free Command Buffer 0x%" PRIxLEAST64 452 " belonging to Command Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").", 453 reinterpret_cast<uint64_t>(commandBuffer), pNode->parentObj, reinterpret_cast<uint64_t &>(commandPool)); 454 } 455 } else { 456 skipCall |= log_msg( 457 mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, object_handle, __LINE__, OBJTRACK_NONE, 458 "OBJTRACK", "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?", object_handle); 459 } 460 return skipCall; 461} 462 463static bool free_command_buffer(VkDevice device, VkCommandBuffer commandBuffer) { 464 bool skipCall = false; 465 auto cbItem = VkCommandBufferMap.find(reinterpret_cast<uint64_t>(commandBuffer)); 466 if (cbItem != VkCommandBufferMap.end()) { 467 OBJTRACK_NODE *pNode = cbItem->second; 468 uint32_t objIndex = objTypeToIndex(pNode->objType); 469 assert(numTotalObjs > 0); 470 numTotalObjs--; 471 assert(numObjs[objIndex] > 0); 472 numObjs[objIndex]--; 473 skipCall |= log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType, 474 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, OBJTRACK_NONE, "OBJTRACK", 475 "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).", 476 string_VkDebugReportObjectTypeEXT(pNode->objType), reinterpret_cast<uint64_t>(commandBuffer), 477 numTotalObjs, numObjs[objIndex], string_VkDebugReportObjectTypeEXT(pNode->objType)); 478 delete pNode; 479 VkCommandBufferMap.erase(cbItem); 480 } 481 return skipCall; 482} 483 484static void alloc_descriptor_set(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSet vkObj, 485 VkDebugReportObjectTypeEXT objType) { 486 log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE, "OBJTRACK", 487 "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++, string_VkDebugReportObjectTypeEXT(objType), 488 (uint64_t)(vkObj)); 489 490 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE; 491 pNewObjNode->objType = objType; 492 pNewObjNode->belongsTo = (uint64_t)device; 493 pNewObjNode->status = OBJSTATUS_NONE; 494 pNewObjNode->vkObj = (uint64_t)(vkObj); 495 pNewObjNode->parentObj = (uint64_t)descriptorPool; 496 VkDescriptorSetMap[(uint64_t)vkObj] = pNewObjNode; 497 uint32_t objIndex = objTypeToIndex(objType); 498 numObjs[objIndex]++; 499 numTotalObjs++; 500} 501 502static bool validate_descriptor_set(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSet descriptorSet) { 503 bool skipCall = false; 504 uint64_t object_handle = reinterpret_cast<uint64_t &>(descriptorSet); 505 auto dsItem = VkDescriptorSetMap.find(object_handle); 506 if (dsItem != VkDescriptorSetMap.end()) { 507 OBJTRACK_NODE *pNode = dsItem->second; 508 509 if (pNode->parentObj != reinterpret_cast<uint64_t &>(descriptorPool)) { 510 skipCall |= log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, pNode->objType, object_handle, __LINE__, 511 OBJTRACK_DESCRIPTOR_POOL_MISMATCH, "OBJTRACK", 512 "FreeDescriptorSets is attempting to free descriptorSet 0x%" PRIxLEAST64 513 " belonging to Descriptor Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").", 514 reinterpret_cast<uint64_t &>(descriptorSet), pNode->parentObj, 515 reinterpret_cast<uint64_t &>(descriptorPool)); 516 } 517 } else { 518 skipCall |= log_msg( 519 mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, object_handle, __LINE__, OBJTRACK_NONE, 520 "OBJTRACK", "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?", object_handle); 521 } 522 return skipCall; 523} 524 525static bool free_descriptor_set(VkDevice device, VkDescriptorSet descriptorSet) { 526 bool skipCall = false; 527 auto dsItem = VkDescriptorSetMap.find(reinterpret_cast<uint64_t &>(descriptorSet)); 528 if (dsItem != VkDescriptorSetMap.end()) { 529 OBJTRACK_NODE *pNode = dsItem->second; 530 uint32_t objIndex = objTypeToIndex(pNode->objType); 531 assert(numTotalObjs > 0); 532 numTotalObjs--; 533 assert(numObjs[objIndex] > 0); 534 numObjs[objIndex]--; 535 skipCall |= log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType, 536 reinterpret_cast<uint64_t &>(descriptorSet), __LINE__, OBJTRACK_NONE, "OBJTRACK", 537 "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).", 538 string_VkDebugReportObjectTypeEXT(pNode->objType), reinterpret_cast<uint64_t &>(descriptorSet), 539 numTotalObjs, numObjs[objIndex], string_VkDebugReportObjectTypeEXT(pNode->objType)); 540 delete pNode; 541 VkDescriptorSetMap.erase(dsItem); 542 } 543 return skipCall; 544} 545 546static void create_queue(VkDevice device, VkQueue vkObj, VkDebugReportObjectTypeEXT objType) { 547 548 log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__, 549 OBJTRACK_NONE, "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++, 550 string_VkDebugReportObjectTypeEXT(objType), reinterpret_cast<uint64_t>(vkObj)); 551 552 OBJTRACK_NODE *p_obj_node = NULL; 553 auto queue_item = VkQueueMap.find(reinterpret_cast<uint64_t>(vkObj)); 554 if (queue_item == VkQueueMap.end()) { 555 p_obj_node = new OBJTRACK_NODE; 556 VkQueueMap[reinterpret_cast<uint64_t>(vkObj)] = p_obj_node; 557 uint32_t objIndex = objTypeToIndex(objType); 558 numObjs[objIndex]++; 559 numTotalObjs++; 560 } else { 561 p_obj_node = queue_item->second; 562 } 563 p_obj_node->objType = objType; 564 p_obj_node->belongsTo = reinterpret_cast<uint64_t>(device); 565 p_obj_node->status = OBJSTATUS_NONE; 566 p_obj_node->vkObj = reinterpret_cast<uint64_t>(vkObj); 567} 568 569static void create_swapchain_image_obj(VkDevice dispatchable_object, VkImage vkObj, VkSwapchainKHR swapchain) { 570 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, (uint64_t)vkObj, 571 __LINE__, OBJTRACK_NONE, "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++, 572 "SwapchainImage", (uint64_t)(vkObj)); 573 574 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE; 575 pNewObjNode->belongsTo = (uint64_t)dispatchable_object; 576 pNewObjNode->objType = VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT; 577 pNewObjNode->status = OBJSTATUS_NONE; 578 pNewObjNode->vkObj = (uint64_t)vkObj; 579 pNewObjNode->parentObj = (uint64_t)swapchain; 580 swapchainImageMap[(uint64_t)(vkObj)] = pNewObjNode; 581} 582 583static void create_device(VkInstance dispatchable_object, VkDevice vkObj, VkDebugReportObjectTypeEXT objType) { 584 log_msg(mid(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE, 585 "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++, 586 string_VkDebugReportObjectTypeEXT(objType), (uint64_t)(vkObj)); 587 588 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE; 589 pNewObjNode->belongsTo = (uint64_t)dispatchable_object; 590 pNewObjNode->objType = objType; 591 pNewObjNode->status = OBJSTATUS_NONE; 592 pNewObjNode->vkObj = (uint64_t)(vkObj); 593 VkDeviceMap[(uint64_t)vkObj] = pNewObjNode; 594 uint32_t objIndex = objTypeToIndex(objType); 595 numObjs[objIndex]++; 596 numTotalObjs++; 597} 598 599// 600// Non-auto-generated API functions called by generated code 601// 602VkResult explicit_CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, 603 VkInstance *pInstance) { 604 VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO); 605 606 assert(chain_info->u.pLayerInfo); 607 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr; 608 PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance"); 609 if (fpCreateInstance == NULL) { 610 return VK_ERROR_INITIALIZATION_FAILED; 611 } 612 613 // Advance the link info for the next element on the chain 614 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext; 615 616 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance); 617 if (result != VK_SUCCESS) { 618 return result; 619 } 620 621 layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map); 622 my_data->instance = *pInstance; 623 initInstanceTable(*pInstance, fpGetInstanceProcAddr, object_tracker_instance_table_map); 624 VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(object_tracker_instance_table_map, *pInstance); 625 626 // Look for one or more debug report create info structures, and copy the 627 // callback(s) for each one found (for use by vkDestroyInstance) 628 layer_copy_tmp_callbacks(pCreateInfo->pNext, &my_data->num_tmp_callbacks, &my_data->tmp_dbg_create_infos, 629 &my_data->tmp_callbacks); 630 631 my_data->report_data = debug_report_create_instance(pInstanceTable, *pInstance, pCreateInfo->enabledExtensionCount, 632 pCreateInfo->ppEnabledExtensionNames); 633 634 init_object_tracker(my_data, pAllocator); 635 createInstanceRegisterExtensions(pCreateInfo, *pInstance); 636 637 create_instance(*pInstance, *pInstance, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT); 638 639 return result; 640} 641 642void explicit_GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice gpu, uint32_t *pCount, VkQueueFamilyProperties *pProperties) { 643 get_dispatch_table(object_tracker_instance_table_map, gpu)->GetPhysicalDeviceQueueFamilyProperties(gpu, pCount, pProperties); 644 645 std::lock_guard<std::mutex> lock(global_lock); 646 if (pProperties != NULL) { 647 for (uint32_t i = 0; i < *pCount; i++) { 648 queue_family_properties.emplace_back(pProperties[i]); 649 } 650 } 651} 652 653VkResult explicit_CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, 654 VkDevice *pDevice) { 655 std::lock_guard<std::mutex> lock(global_lock); 656 layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map); 657 VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO); 658 659 assert(chain_info->u.pLayerInfo); 660 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr; 661 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr; 662 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice"); 663 if (fpCreateDevice == NULL) { 664 return VK_ERROR_INITIALIZATION_FAILED; 665 } 666 667 // Advance the link info for the next element on the chain 668 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext; 669 670 VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice); 671 if (result != VK_SUCCESS) { 672 return result; 673 } 674 675 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map); 676 my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice); 677 678 initDeviceTable(*pDevice, fpGetDeviceProcAddr, object_tracker_device_table_map); 679 680 createDeviceRegisterExtensions(pCreateInfo, *pDevice); 681 682 if (VkPhysicalDeviceMap.find((uint64_t)gpu) != VkPhysicalDeviceMap.end()) { 683 OBJTRACK_NODE *pNewObjNode = VkPhysicalDeviceMap[(uint64_t)gpu]; 684 create_device((VkInstance)pNewObjNode->belongsTo, *pDevice, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT); 685 } 686 687 return result; 688} 689 690VkResult explicit_EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount, 691 VkPhysicalDevice *pPhysicalDevices) { 692 bool skipCall = VK_FALSE; 693 std::unique_lock<std::mutex> lock(global_lock); 694 skipCall |= validate_instance(instance, instance, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, false); 695 lock.unlock(); 696 if (skipCall) 697 return VK_ERROR_VALIDATION_FAILED_EXT; 698 VkResult result = get_dispatch_table(object_tracker_instance_table_map, instance) 699 ->EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices); 700 lock.lock(); 701 if (result == VK_SUCCESS) { 702 if (pPhysicalDevices) { 703 for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) { 704 create_physical_device(instance, pPhysicalDevices[i], VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT); 705 } 706 } 707 } 708 lock.unlock(); 709 return result; 710} 711 712void explicit_GetDeviceQueue(VkDevice device, uint32_t queueNodeIndex, uint32_t queueIndex, VkQueue *pQueue) { 713 std::unique_lock<std::mutex> lock(global_lock); 714 validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false); 715 lock.unlock(); 716 717 get_dispatch_table(object_tracker_device_table_map, device)->GetDeviceQueue(device, queueNodeIndex, queueIndex, pQueue); 718 719 lock.lock(); 720 721 create_queue(device, *pQueue, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT); 722 addQueueInfo(queueNodeIndex, *pQueue); 723} 724 725VkResult explicit_MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, 726 void **ppData) { 727 bool skipCall = VK_FALSE; 728 std::unique_lock<std::mutex> lock(global_lock); 729 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false); 730 lock.unlock(); 731 if (skipCall == VK_TRUE) 732 return VK_ERROR_VALIDATION_FAILED_EXT; 733 734 VkResult result = 735 get_dispatch_table(object_tracker_device_table_map, device)->MapMemory(device, mem, offset, size, flags, ppData); 736 737 return result; 738} 739 740void explicit_UnmapMemory(VkDevice device, VkDeviceMemory mem) { 741 bool skipCall = VK_FALSE; 742 std::unique_lock<std::mutex> lock(global_lock); 743 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false); 744 lock.unlock(); 745 if (skipCall == VK_TRUE) 746 return; 747 748 get_dispatch_table(object_tracker_device_table_map, device)->UnmapMemory(device, mem); 749} 750 751VkResult explicit_QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) { 752 std::unique_lock<std::mutex> lock(global_lock); 753 validateQueueFlags(queue, "QueueBindSparse"); 754 755 for (uint32_t i = 0; i < bindInfoCount; i++) { 756 for (uint32_t j = 0; j < pBindInfo[i].bufferBindCount; j++) 757 validate_buffer(queue, pBindInfo[i].pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, false); 758 for (uint32_t j = 0; j < pBindInfo[i].imageOpaqueBindCount; j++) 759 validate_image(queue, pBindInfo[i].pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, false); 760 for (uint32_t j = 0; j < pBindInfo[i].imageBindCount; j++) 761 validate_image(queue, pBindInfo[i].pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, false); 762 } 763 lock.unlock(); 764 765 VkResult result = 766 get_dispatch_table(object_tracker_device_table_map, queue)->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence); 767 return result; 768} 769 770VkResult explicit_AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pAllocateInfo, 771 VkCommandBuffer *pCommandBuffers) { 772 bool skipCall = VK_FALSE; 773 std::unique_lock<std::mutex> lock(global_lock); 774 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false); 775 skipCall |= validate_command_pool(device, pAllocateInfo->commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false); 776 lock.unlock(); 777 778 if (skipCall) { 779 return VK_ERROR_VALIDATION_FAILED_EXT; 780 } 781 782 VkResult result = 783 get_dispatch_table(object_tracker_device_table_map, device)->AllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers); 784 785 lock.lock(); 786 for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; i++) { 787 alloc_command_buffer(device, pAllocateInfo->commandPool, pCommandBuffers[i], VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 788 pAllocateInfo->level); 789 } 790 lock.unlock(); 791 792 return result; 793} 794 795VkResult explicit_AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, 796 VkDescriptorSet *pDescriptorSets) { 797 bool skipCall = VK_FALSE; 798 std::unique_lock<std::mutex> lock(global_lock); 799 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false); 800 skipCall |= 801 validate_descriptor_pool(device, pAllocateInfo->descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false); 802 for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) { 803 skipCall |= validate_descriptor_set_layout(device, pAllocateInfo->pSetLayouts[i], 804 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, false); 805 } 806 lock.unlock(); 807 if (skipCall) { 808 return VK_ERROR_VALIDATION_FAILED_EXT; 809 } 810 811 VkResult result = 812 get_dispatch_table(object_tracker_device_table_map, device)->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets); 813 814 if (VK_SUCCESS == result) { 815 lock.lock(); 816 for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) { 817 alloc_descriptor_set(device, pAllocateInfo->descriptorPool, pDescriptorSets[i], 818 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT); 819 } 820 lock.unlock(); 821 } 822 823 return result; 824} 825 826void explicit_FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, 827 const VkCommandBuffer *pCommandBuffers) { 828 bool skipCall = false; 829 std::unique_lock<std::mutex> lock(global_lock); 830 validate_command_pool(device, commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false); 831 validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false); 832 for (uint32_t i = 0; i < commandBufferCount; i++) { 833 skipCall |= validate_command_buffer(device, commandPool, pCommandBuffers[i]); 834 } 835 836 lock.unlock(); 837 if (!skipCall) { 838 get_dispatch_table(object_tracker_device_table_map, device) 839 ->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers); 840 } 841 842 lock.lock(); 843 for (uint32_t i = 0; i < commandBufferCount; i++) { 844 free_command_buffer(device, pCommandBuffers[i]); 845 } 846} 847 848void explicit_DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) { 849 std::unique_lock<std::mutex> lock(global_lock); 850 // A swapchain's images are implicitly deleted when the swapchain is deleted. 851 // Remove this swapchain's images from our map of such images. 852 std::unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = swapchainImageMap.begin(); 853 while (itr != swapchainImageMap.end()) { 854 OBJTRACK_NODE *pNode = (*itr).second; 855 if (pNode->parentObj == reinterpret_cast<uint64_t &>(swapchain)) { 856 delete pNode; 857 swapchainImageMap.erase(itr++); 858 } else { 859 ++itr; 860 } 861 } 862 destroy_swapchain_khr(device, swapchain); 863 lock.unlock(); 864 865 get_dispatch_table(object_tracker_device_table_map, device)->DestroySwapchainKHR(device, swapchain, pAllocator); 866} 867 868void explicit_FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) { 869 std::unique_lock<std::mutex> lock(global_lock); 870 validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false); 871 lock.unlock(); 872 873 get_dispatch_table(object_tracker_device_table_map, device)->FreeMemory(device, mem, pAllocator); 874 875 lock.lock(); 876 destroy_device_memory(device, mem); 877} 878 879VkResult explicit_FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, 880 const VkDescriptorSet *pDescriptorSets) { 881 bool skipCall = false; 882 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; 883 std::unique_lock<std::mutex> lock(global_lock); 884 skipCall |= validate_descriptor_pool(device, descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false); 885 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false); 886 for (uint32_t i = 0; i < count; i++) { 887 skipCall |= validate_descriptor_set(device, descriptorPool, pDescriptorSets[i]); 888 } 889 890 lock.unlock(); 891 if (!skipCall) { 892 result = get_dispatch_table(object_tracker_device_table_map, device) 893 ->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets); 894 } 895 896 lock.lock(); 897 for (uint32_t i = 0; i < count; i++) { 898 free_descriptor_set(device, pDescriptorSets[i]); 899 } 900 return result; 901} 902 903void explicit_DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) { 904 bool skipCall = VK_FALSE; 905 std::unique_lock<std::mutex> lock(global_lock); 906 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false); 907 skipCall |= validate_descriptor_pool(device, descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false); 908 lock.unlock(); 909 if (skipCall) { 910 return; 911 } 912 // A DescriptorPool's descriptor sets are implicitly deleted when the pool is deleted. 913 // Remove this pool's descriptor sets from our descriptorSet map. 914 lock.lock(); 915 std::unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = VkDescriptorSetMap.begin(); 916 while (itr != VkDescriptorSetMap.end()) { 917 OBJTRACK_NODE *pNode = (*itr).second; 918 auto del_itr = itr++; 919 if (pNode->parentObj == (uint64_t)(descriptorPool)) { 920 destroy_descriptor_set(device, (VkDescriptorSet)((*del_itr).first)); 921 } 922 } 923 destroy_descriptor_pool(device, descriptorPool); 924 lock.unlock(); 925 get_dispatch_table(object_tracker_device_table_map, device)->DestroyDescriptorPool(device, descriptorPool, pAllocator); 926} 927 928void explicit_DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) { 929 bool skipCall = false; 930 std::unique_lock<std::mutex> lock(global_lock); 931 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false); 932 skipCall |= validate_command_pool(device, commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false); 933 lock.unlock(); 934 if (skipCall) { 935 return; 936 } 937 lock.lock(); 938 // A CommandPool's command buffers are implicitly deleted when the pool is deleted. 939 // Remove this pool's cmdBuffers from our cmd buffer map. 940 std::unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = VkCommandBufferMap.begin(); 941 std::unordered_map<uint64_t, OBJTRACK_NODE *>::iterator del_itr; 942 while (itr != VkCommandBufferMap.end()) { 943 OBJTRACK_NODE *pNode = (*itr).second; 944 del_itr = itr++; 945 if (pNode->parentObj == (uint64_t)(commandPool)) { 946 skipCall |= validate_command_buffer(device, commandPool, reinterpret_cast<VkCommandBuffer>((*del_itr).first)); 947 free_command_buffer(device, reinterpret_cast<VkCommandBuffer>((*del_itr).first)); 948 } 949 } 950 destroy_command_pool(device, commandPool); 951 lock.unlock(); 952 get_dispatch_table(object_tracker_device_table_map, device)->DestroyCommandPool(device, commandPool, pAllocator); 953} 954 955VkResult explicit_GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) { 956 bool skipCall = VK_FALSE; 957 std::unique_lock<std::mutex> lock(global_lock); 958 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false); 959 lock.unlock(); 960 if (skipCall) 961 return VK_ERROR_VALIDATION_FAILED_EXT; 962 963 VkResult result = get_dispatch_table(object_tracker_device_table_map, device) 964 ->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages); 965 966 if (pSwapchainImages != NULL) { 967 lock.lock(); 968 for (uint32_t i = 0; i < *pCount; i++) { 969 create_swapchain_image_obj(device, pSwapchainImages[i], swapchain); 970 } 971 lock.unlock(); 972 } 973 return result; 974} 975 976// TODO: Add special case to codegen to cover validating all the pipelines instead of just the first 977VkResult explicit_CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, 978 const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, 979 VkPipeline *pPipelines) { 980 bool skipCall = VK_FALSE; 981 std::unique_lock<std::mutex> lock(global_lock); 982 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false); 983 if (pCreateInfos) { 984 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) { 985 if (pCreateInfos[idx0].basePipelineHandle) { 986 skipCall |= validate_pipeline(device, pCreateInfos[idx0].basePipelineHandle, 987 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, true); 988 } 989 if (pCreateInfos[idx0].layout) { 990 skipCall |= validate_pipeline_layout(device, pCreateInfos[idx0].layout, 991 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, false); 992 } 993 if (pCreateInfos[idx0].pStages) { 994 for (uint32_t idx1 = 0; idx1 < pCreateInfos[idx0].stageCount; ++idx1) { 995 if (pCreateInfos[idx0].pStages[idx1].module) { 996 skipCall |= validate_shader_module(device, pCreateInfos[idx0].pStages[idx1].module, 997 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, false); 998 } 999 } 1000 } 1001 if (pCreateInfos[idx0].renderPass) { 1002 skipCall |= 1003 validate_render_pass(device, pCreateInfos[idx0].renderPass, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, false); 1004 } 1005 } 1006 } 1007 if (pipelineCache) { 1008 skipCall |= validate_pipeline_cache(device, pipelineCache, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, false); 1009 } 1010 lock.unlock(); 1011 if (skipCall) 1012 return VK_ERROR_VALIDATION_FAILED_EXT; 1013 VkResult result = get_dispatch_table(object_tracker_device_table_map, device) 1014 ->CreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines); 1015 lock.lock(); 1016 if (result == VK_SUCCESS) { 1017 for (uint32_t idx2 = 0; idx2 < createInfoCount; ++idx2) { 1018 create_pipeline(device, pPipelines[idx2], VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT); 1019 } 1020 } 1021 lock.unlock(); 1022 return result; 1023} 1024 1025// TODO: Add special case to codegen to cover validating all the pipelines instead of just the first 1026VkResult explicit_CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, 1027 const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, 1028 VkPipeline *pPipelines) { 1029 bool skipCall = VK_FALSE; 1030 std::unique_lock<std::mutex> lock(global_lock); 1031 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false); 1032 if (pCreateInfos) { 1033 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) { 1034 if (pCreateInfos[idx0].basePipelineHandle) { 1035 skipCall |= validate_pipeline(device, pCreateInfos[idx0].basePipelineHandle, 1036 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, true); 1037 } 1038 if (pCreateInfos[idx0].layout) { 1039 skipCall |= validate_pipeline_layout(device, pCreateInfos[idx0].layout, 1040 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, false); 1041 } 1042 if (pCreateInfos[idx0].stage.module) { 1043 skipCall |= validate_shader_module(device, pCreateInfos[idx0].stage.module, 1044 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, false); 1045 } 1046 } 1047 } 1048 if (pipelineCache) { 1049 skipCall |= validate_pipeline_cache(device, pipelineCache, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, false); 1050 } 1051 lock.unlock(); 1052 if (skipCall) 1053 return VK_ERROR_VALIDATION_FAILED_EXT; 1054 VkResult result = get_dispatch_table(object_tracker_device_table_map, device) 1055 ->CreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines); 1056 lock.lock(); 1057 if (result == VK_SUCCESS) { 1058 for (uint32_t idx1 = 0; idx1 < createInfoCount; ++idx1) { 1059 create_pipeline(device, pPipelines[idx1], VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT); 1060 } 1061 } 1062 lock.unlock(); 1063 return result; 1064} 1065 1066} // namespace object_tracker 1067