object_tracker.h revision 2e87e61dd76baebde60981fcca8b38fe3b1883d4
1/*
2 *
3 * Copyright (C) 2015 Valve Corporation
4 * Copyright (C) 2015 Google Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Author: Jon Ashburn <jon@lunarg.com>
25 * Author: Mark Lobodzinski <mark@lunarg.com>
26 * Author: Tobin Ehlis <tobin@lunarg.com>
27 */
28
29#include "vulkan/vk_layer.h"
30#include "vk_layer_extension_utils.h"
31#include "vk_enum_string_helper.h"
32
33// Object Tracker ERROR codes
34typedef enum _OBJECT_TRACK_ERROR
35{
36    OBJTRACK_NONE,                              // Used for INFO & other non-error messages
37    OBJTRACK_UNKNOWN_OBJECT,                    // Updating uses of object that's not in global object list
38    OBJTRACK_INTERNAL_ERROR,                    // Bug with data tracking within the layer
39    OBJTRACK_DESTROY_OBJECT_FAILED,             // Couldn't find object to be destroyed
40    OBJTRACK_OBJECT_LEAK,                       // OBJECT was not correctly freed/destroyed
41    OBJTRACK_OBJCOUNT_MAX_EXCEEDED,             // Request for Object data in excess of max obj count
42    OBJTRACK_INVALID_OBJECT,                    // Object used that has never been created
43    OBJTRACK_DESCRIPTOR_POOL_MISMATCH,          // Descriptor Pools specified incorrectly
44    OBJTRACK_COMMAND_POOL_MISMATCH,             // Command Pools specified incorrectly
45} OBJECT_TRACK_ERROR;
46
47// Object Status -- used to track state of individual objects
48typedef VkFlags ObjectStatusFlags;
49typedef enum _ObjectStatusFlagBits
50{
51    OBJSTATUS_NONE                              = 0x00000000, // No status is set
52    OBJSTATUS_FENCE_IS_SUBMITTED                = 0x00000001, // Fence has been submitted
53    OBJSTATUS_VIEWPORT_BOUND                    = 0x00000002, // Viewport state object has been bound
54    OBJSTATUS_RASTER_BOUND                      = 0x00000004, // Viewport state object has been bound
55    OBJSTATUS_COLOR_BLEND_BOUND                 = 0x00000008, // Viewport state object has been bound
56    OBJSTATUS_DEPTH_STENCIL_BOUND               = 0x00000010, // Viewport state object has been bound
57    OBJSTATUS_GPU_MEM_MAPPED                    = 0x00000020, // Memory object is currently mapped
58} ObjectStatusFlagBits;
59
60typedef struct _OBJTRACK_NODE {
61    uint64_t             vkObj;                 // Object handle
62    VkDebugReportObjectTypeEXT      objType;               // Object type identifier
63    ObjectStatusFlags    status;                // Object state
64    uint64_t             parentObj;             // Parent object
65} OBJTRACK_NODE;
66
67// prototype for extension functions
68uint64_t objTrackGetObjectCount(VkDevice device);
69uint64_t objTrackGetObjectsOfTypeCount(VkDevice, VkDebugReportObjectTypeEXT type);
70
71// Func ptr typedefs
72typedef uint64_t (*OBJ_TRACK_GET_OBJECT_COUNT)(VkDevice);
73typedef uint64_t (*OBJ_TRACK_GET_OBJECTS_OF_TYPE_COUNT)(VkDevice, VkDebugReportObjectTypeEXT);
74
75struct layer_data {
76    debug_report_data *report_data;
77    //TODO: put instance data here
78    VkDebugReportCallbackEXT   logging_callback;
79    bool wsi_enabled;
80    bool objtrack_extensions_enabled;
81
82    layer_data() :
83        report_data(nullptr),
84        logging_callback(VK_NULL_HANDLE),
85        wsi_enabled(false),
86        objtrack_extensions_enabled(false)
87    {};
88};
89
90struct instExts {
91    bool wsi_enabled;
92};
93
94static std::unordered_map<void *, struct instExts> instanceExtMap;
95static std::unordered_map<void*, layer_data *> layer_data_map;
96static device_table_map                        object_tracker_device_table_map;
97static instance_table_map                      object_tracker_instance_table_map;
98
99// We need additionally validate image usage using a separate map
100// of swapchain-created images
101static unordered_map<uint64_t, OBJTRACK_NODE*> swapchainImageMap;
102
103static long long unsigned int object_track_index = 0;
104static int objLockInitialized = 0;
105static loader_platform_thread_mutex objLock;
106
107// Objects stored in a global map w/ struct containing basic info
108// unordered_map<const void*, OBJTRACK_NODE*> objMap;
109
110#define NUM_OBJECT_TYPES (VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT+1)
111
112static uint64_t                         numObjs[NUM_OBJECT_TYPES]     = {0};
113static uint64_t                         numTotalObjs                  = 0;
114static VkQueueFamilyProperties         *queueInfo                     = NULL;
115static uint32_t                         queueCount                    = 0;
116
117template layer_data *get_my_data_ptr<layer_data>(
118        void *data_key, std::unordered_map<void *, layer_data *> &data_map);
119
120static inline const char* string_VkDebugReportObjectTypeEXT(VkDebugReportObjectTypeEXT input_value)
121{
122    switch ((VkDebugReportObjectTypeEXT)input_value)
123    {
124        case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT:
125            return "VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT";
126        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
127            return "VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT";
128        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT:
129            return "VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT";
130        case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT:
131            return "VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT";
132        case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT:
133            return "VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT";
134        case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT:
135            return "VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT";
136        case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT:
137            return "VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT";
138        case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT:
139            return "VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT";
140        case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT:
141            return "VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT";
142        case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT:
143            return "VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT";
144        case VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT:
145            return "VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT";
146        case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT:
147            return "VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT";
148        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
149            return "VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT";
150        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT:
151            return "VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT";
152        case VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT:
153            return "VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT";
154        case VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT:
155            return "VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT";
156        case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT:
157            return "VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT";
158        case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT:
159            return "VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT";
160        case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT:
161            return "VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT";
162        case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT:
163            return "VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT";
164        case VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT:
165            return "VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT";
166        case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT:
167            return "VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT";
168        case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT:
169            return "VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT";
170        case VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT:
171            return "VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT";
172        case VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT:
173            return "VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT";
174        case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
175            return "VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT";
176        default:
177            return "Unhandled VkObjectType";
178    }
179}
180
181//
182// Internal Object Tracker Functions
183//
184
185static void createDeviceRegisterExtensions(const VkDeviceCreateInfo* pCreateInfo, VkDevice device)
186{
187    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
188    VkLayerDispatchTable *pDisp = get_dispatch_table(object_tracker_device_table_map, device);
189    PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
190    pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR) gpa(device, "vkCreateSwapchainKHR");
191    pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR) gpa(device, "vkDestroySwapchainKHR");
192    pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR) gpa(device, "vkGetSwapchainImagesKHR");
193    pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR) gpa(device, "vkAcquireNextImageKHR");
194    pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR) gpa(device, "vkQueuePresentKHR");
195    my_device_data->wsi_enabled = false;
196    for (uint32_t i = 0; i < pCreateInfo->enabledExtensionNameCount; i++) {
197        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
198            my_device_data->wsi_enabled = true;
199
200        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], "OBJTRACK_EXTENSIONS") == 0)
201            my_device_data->objtrack_extensions_enabled = true;
202    }
203}
204
205static void createInstanceRegisterExtensions(const VkInstanceCreateInfo* pCreateInfo, VkInstance instance)
206{
207    uint32_t i;
208    VkLayerInstanceDispatchTable *pDisp = get_dispatch_table(object_tracker_instance_table_map, instance);
209    PFN_vkGetInstanceProcAddr gpa = pDisp->GetInstanceProcAddr;
210    pDisp->GetPhysicalDeviceSurfaceSupportKHR = (PFN_vkGetPhysicalDeviceSurfaceSupportKHR) gpa(instance, "vkGetPhysicalDeviceSurfaceSupportKHR");
211    pDisp->GetPhysicalDeviceSurfaceCapabilitiesKHR = (PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR) gpa(instance, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR");
212    pDisp->GetPhysicalDeviceSurfaceFormatsKHR = (PFN_vkGetPhysicalDeviceSurfaceFormatsKHR) gpa(instance, "vkGetPhysicalDeviceSurfaceFormatsKHR");
213    pDisp->GetPhysicalDeviceSurfacePresentModesKHR = (PFN_vkGetPhysicalDeviceSurfacePresentModesKHR) gpa(instance, "vkGetPhysicalDeviceSurfacePresentModesKHR");
214
215#if VK_USE_PLATFORM_WIN32_KHR
216    pDisp->CreateWin32SurfaceKHR = (PFN_vkCreateWin32SurfaceKHR) gpa(instance, "vkCreateWin32SurfaceKHR");
217    pDisp->GetPhysicalDeviceWin32PresentationSupportKHR = (PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceWin32PresentationSupportKHR");
218#endif // VK_USE_PLATFORM_WIN32_KHR
219#ifdef VK_USE_PLATFORM_XCB_KHR
220    pDisp->CreateXcbSurfaceKHR = (PFN_vkCreateXcbSurfaceKHR) gpa(instance, "vkCreateXcbSurfaceKHR");
221    pDisp->GetPhysicalDeviceXcbPresentationSupportKHR = (PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceXcbPresentationSupportKHR");
222#endif // VK_USE_PLATFORM_XCB_KHR
223#ifdef VK_USE_PLATFORM_XLIB_KHR
224    pDisp->CreateXlibSurfaceKHR = (PFN_vkCreateXlibSurfaceKHR) gpa(instance, "vkCreateXlibSurfaceKHR");
225    pDisp->GetPhysicalDeviceXlibPresentationSupportKHR = (PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceXlibPresentationSupportKHR");
226#endif // VK_USE_PLATFORM_XLIB_KHR
227#ifdef VK_USE_PLATFORM_MIR_KHR
228    pDisp->CreateMirSurfaceKHR = (PFN_vkCreateMirSurfaceKHR) gpa(instance, "vkCreateMirSurfaceKHR");
229    pDisp->GetPhysicalDeviceMirPresentationSupportKHR = (PFN_vkGetPhysicalDeviceMirPresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceMirPresentationSupportKHR");
230#endif // VK_USE_PLATFORM_MIR_KHR
231#ifdef VK_USE_PLATFORM_WAYLAND_KHR
232    pDisp->CreateWaylandSurfaceKHR = (PFN_vkCreateWaylandSurfaceKHR) gpa(instance, "vkCreateWaylandSurfaceKHR");
233    pDisp->GetPhysicalDeviceWaylandPresentationSupportKHR = (PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceWaylandPresentationSupportKHR");
234#endif //  VK_USE_PLATFORM_WAYLAND_KHR
235#ifdef VK_USE_PLATFORM_ANDROID_KHR
236    pDisp->CreateAndroidSurfaceKHR = (PFN_vkCreateAndroidSurfaceKHR) gpa(instance, "vkCreateAndroidSurfaceKHR");
237#endif // VK_USE_PLATFORM_ANDROID_KHR
238
239    instanceExtMap[pDisp].wsi_enabled = false;
240    for (i = 0; i < pCreateInfo->enabledExtensionNameCount; i++) {
241        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SURFACE_EXTENSION_NAME) == 0)
242            instanceExtMap[pDisp].wsi_enabled = true;
243
244    }
245}
246
247// Indicate device or instance dispatch table type
248typedef enum _DispTableType
249{
250    DISP_TBL_TYPE_INSTANCE,
251    DISP_TBL_TYPE_DEVICE,
252} DispTableType;
253
254debug_report_data *mdd(const void* object)
255{
256    dispatch_key key = get_dispatch_key(object);
257    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
258    return my_data->report_data;
259}
260
261debug_report_data *mid(VkInstance object)
262{
263    dispatch_key key = get_dispatch_key(object);
264    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
265    return my_data->report_data;
266}
267
268// For each Queue's doubly linked-list of mem refs
269typedef struct _OT_MEM_INFO {
270    VkDeviceMemory       mem;
271    struct _OT_MEM_INFO *pNextMI;
272    struct _OT_MEM_INFO *pPrevMI;
273
274} OT_MEM_INFO;
275
276// Track Queue information
277typedef struct _OT_QUEUE_INFO {
278    OT_MEM_INFO                     *pMemRefList;
279    struct _OT_QUEUE_INFO           *pNextQI;
280    uint32_t                         queueNodeIndex;
281    VkQueue                          queue;
282    uint32_t                         refCount;
283} OT_QUEUE_INFO;
284
285// Global list of QueueInfo structures, one per queue
286static OT_QUEUE_INFO *g_pQueueInfo = NULL;
287
288// Convert an object type enum to an object type array index
289static uint32_t
290objTypeToIndex(
291    uint32_t objType)
292{
293    uint32_t index = objType;
294    return index;
295}
296
297// Add new queue to head of global queue list
298static void
299addQueueInfo(
300    uint32_t queueNodeIndex,
301    VkQueue  queue)
302{
303    OT_QUEUE_INFO *pQueueInfo = new OT_QUEUE_INFO;
304
305    if (pQueueInfo != NULL) {
306        memset(pQueueInfo, 0, sizeof(OT_QUEUE_INFO));
307        pQueueInfo->queue       = queue;
308        pQueueInfo->queueNodeIndex = queueNodeIndex;
309        pQueueInfo->pNextQI   = g_pQueueInfo;
310        g_pQueueInfo          = pQueueInfo;
311    }
312    else {
313        log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, reinterpret_cast<uint64_t>(queue), 0, OBJTRACK_INTERNAL_ERROR, "OBJTRACK",
314            "ERROR:  VK_ERROR_OUT_OF_HOST_MEMORY -- could not allocate memory for Queue Information");
315    }
316}
317
318// Destroy memRef lists and free all memory
319static void
320destroyQueueMemRefLists(void)
321{
322    OT_QUEUE_INFO *pQueueInfo    = g_pQueueInfo;
323    OT_QUEUE_INFO *pDelQueueInfo = NULL;
324    while (pQueueInfo != NULL) {
325        OT_MEM_INFO *pMemInfo = pQueueInfo->pMemRefList;
326        while (pMemInfo != NULL) {
327            OT_MEM_INFO *pDelMemInfo = pMemInfo;
328            pMemInfo = pMemInfo->pNextMI;
329            delete pDelMemInfo;
330        }
331        pDelQueueInfo = pQueueInfo;
332        pQueueInfo    = pQueueInfo->pNextQI;
333        delete pDelQueueInfo;
334    }
335    g_pQueueInfo = pQueueInfo;
336}
337
338static void
339setGpuQueueInfoState(
340    uint32_t  count,
341    void     *pData)
342{
343    queueCount = count;
344    queueInfo  = (VkQueueFamilyProperties*)realloc((void*)queueInfo, count * sizeof(VkQueueFamilyProperties));
345    if (queueInfo != NULL) {
346        memcpy(queueInfo, pData, count * sizeof(VkQueueFamilyProperties));
347    }
348}
349
350// Check Queue type flags for selected queue operations
351static void
352validateQueueFlags(
353    VkQueue     queue,
354    const char *function)
355{
356    OT_QUEUE_INFO *pQueueInfo = g_pQueueInfo;
357    while ((pQueueInfo != NULL) && (pQueueInfo->queue != queue)) {
358        pQueueInfo = pQueueInfo->pNextQI;
359    }
360    if (pQueueInfo != NULL) {
361        if ((queueInfo != NULL) && (queueInfo[pQueueInfo->queueNodeIndex].queueFlags & VK_QUEUE_SPARSE_BINDING_BIT) == 0) {
362            log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, reinterpret_cast<uint64_t>(queue), 0, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
363                "Attempting %s on a non-memory-management capable queue -- VK_QUEUE_SPARSE_BINDING_BIT not set", function);
364        } else {
365            log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, reinterpret_cast<uint64_t>(queue), 0, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
366                "Attempting %s on a possibly non-memory-management capable queue -- VK_QUEUE_SPARSE_BINDING_BIT not known", function);
367        }
368    }
369}
370
371/* TODO: Port to new type safety */
372#if 0
373// Check object status for selected flag state
374static VkBool32
375validate_status(
376    VkObject            dispatchable_object,
377    VkObject            vkObj,
378    VkObjectType        objType,
379    ObjectStatusFlags   status_mask,
380    ObjectStatusFlags   status_flag,
381    VkFlags             msg_flags,
382    OBJECT_TRACK_ERROR  error_code,
383    const char         *fail_msg)
384{
385    if (objMap.find(vkObj) != objMap.end()) {
386        OBJTRACK_NODE* pNode = objMap[vkObj];
387        if ((pNode->status & status_mask) != status_flag) {
388            char str[1024];
389            log_msg(mdd(dispatchable_object), msg_flags, pNode->objType, vkObj, 0, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
390                "OBJECT VALIDATION WARNING: %s object 0x%" PRIxLEAST64 ": %s", string_VkObjectType(objType),
391                 reinterpret_cast<uint64_t>(vkObj), fail_msg);
392            return VK_FALSE;
393        }
394        return VK_TRUE;
395    }
396    else {
397        // If we do not find it print an error
398        log_msg(mdd(dispatchable_object), msg_flags, (VkObjectType) 0, vkObj, 0, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
399            "Unable to obtain status for non-existent object 0x%" PRIxLEAST64 " of %s type",
400            reinterpret_cast<uint64_t>(vkObj), string_VkObjectType(objType));
401        return VK_FALSE;
402    }
403}
404#endif
405
406#include "vk_dispatch_table_helper.h"
407static void
408initObjectTracker(
409    layer_data *my_data,
410    const VkAllocationCallbacks *pAllocator)
411{
412    uint32_t report_flags = 0;
413    uint32_t debug_action = 0;
414    FILE *log_output = NULL;
415    const char *option_str;
416    // initialize ObjectTracker options
417    report_flags = getLayerOptionFlags("ObjectTrackerReportFlags", 0);
418    getLayerOptionEnum("ObjectTrackerDebugAction", (uint32_t *) &debug_action);
419
420    if (debug_action & VK_DBG_LAYER_ACTION_LOG_MSG)
421    {
422        option_str = getLayerOption("ObjectTrackerLogFilename");
423        log_output = getLayerLogOutput(option_str, "ObjectTracker");
424        VkDebugReportCallbackCreateInfoEXT dbgInfo;
425        memset(&dbgInfo, 0, sizeof(dbgInfo));
426        dbgInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
427        dbgInfo.pfnCallback = log_callback;
428        dbgInfo.pUserData = log_output;
429        dbgInfo.flags = report_flags;
430        layer_create_msg_callback(my_data->report_data, &dbgInfo, pAllocator, &my_data->logging_callback);
431    }
432
433    if (!objLockInitialized)
434    {
435        // TODO/TBD: Need to delete this mutex sometime.  How???  One
436        // suggestion is to call this during vkCreateInstance(), and then we
437        // can clean it up during vkDestroyInstance().  However, that requires
438        // that the layer have per-instance locks.  We need to come back and
439        // address this soon.
440        loader_platform_thread_create_mutex(&objLock);
441        objLockInitialized = 1;
442    }
443}
444
445//
446// Forward declares of generated routines
447//
448
449static void create_physical_device(VkInstance dispatchable_object, VkPhysicalDevice vkObj, VkDebugReportObjectTypeEXT objType);
450static void create_instance(VkInstance dispatchable_object, VkInstance object, VkDebugReportObjectTypeEXT objType);
451static void create_device(VkDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType);
452static void create_queue(VkDevice dispatchable_object, VkQueue vkObj, VkDebugReportObjectTypeEXT objType);
453static VkBool32 validate_image(VkQueue dispatchable_object, VkImage object);
454static VkBool32 validate_image(VkCommandBuffer dispatchable_object, VkImage object);
455static VkBool32 validate_command_buffer(VkQueue dispatchable_object, VkCommandBuffer object);
456static VkBool32 validate_descriptor_set(VkCommandBuffer dispatchable_object, VkDescriptorSet object);
457static VkBool32 validate_instance(VkInstance dispatchable_object, VkInstance object);
458static VkBool32 validate_device(VkDevice dispatchable_object, VkDevice object);
459static VkBool32 validate_descriptor_pool(VkDevice dispatchable_object, VkDescriptorPool object);
460static VkBool32 validate_descriptor_set_layout(VkDevice dispatchable_object, VkDescriptorSetLayout object);
461static VkBool32 validate_command_pool(VkDevice dispatchable_object, VkCommandPool object);
462static void destroy_command_pool(VkDevice dispatchable_object, VkCommandPool object);
463static void destroy_command_buffer(VkCommandBuffer dispatchable_object, VkCommandBuffer object);
464static void destroy_descriptor_pool(VkDevice dispatchable_object, VkDescriptorPool object);
465static void destroy_descriptor_set(VkDevice dispatchable_object, VkDescriptorSet object);
466static void destroy_instance(VkInstance dispatchable_object, VkInstance object);
467static void destroy_device_memory(VkDevice dispatchable_object, VkDeviceMemory object);
468static VkBool32 set_device_memory_status(VkDevice dispatchable_object, VkDeviceMemory object, VkDebugReportObjectTypeEXT objType, ObjectStatusFlags status_flag);
469static VkBool32 reset_device_memory_status(VkDevice dispatchable_object, VkDeviceMemory object, VkDebugReportObjectTypeEXT objType, ObjectStatusFlags status_flag);
470#if 0
471static VkBool32 validate_status(VkDevice dispatchable_object, VkFence object, VkDebugReportObjectTypeEXT objType,
472    ObjectStatusFlags status_mask, ObjectStatusFlags status_flag, VkFlags msg_flags, OBJECT_TRACK_ERROR  error_code,
473    const char         *fail_msg);
474#endif
475extern unordered_map<uint64_t, OBJTRACK_NODE*> VkPhysicalDeviceMap;
476extern unordered_map<uint64_t, OBJTRACK_NODE*> VkImageMap;
477extern unordered_map<uint64_t, OBJTRACK_NODE*> VkQueueMap;
478extern unordered_map<uint64_t, OBJTRACK_NODE*> VkDescriptorSetMap;
479extern unordered_map<uint64_t, OBJTRACK_NODE*> VkBufferMap;
480extern unordered_map<uint64_t, OBJTRACK_NODE*> VkFenceMap;
481extern unordered_map<uint64_t, OBJTRACK_NODE*> VkSemaphoreMap;
482extern unordered_map<uint64_t, OBJTRACK_NODE*> VkCommandPoolMap;
483extern unordered_map<uint64_t, OBJTRACK_NODE*> VkCommandBufferMap;
484extern unordered_map<uint64_t, OBJTRACK_NODE*> VkSwapchainKHRMap;
485
486static VkBool32 validate_image(VkQueue dispatchable_object, VkImage object)
487{
488    if ((VkImageMap.find(reinterpret_cast<uint64_t>(object))        == VkImageMap.end()) &&
489        (swapchainImageMap.find(reinterpret_cast<uint64_t>(object)) == swapchainImageMap.end())) {
490        return log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, (uint64_t) object, 0, OBJTRACK_INVALID_OBJECT, "OBJTRACK",
491            "Invalid VkImage Object %" PRIu64, reinterpret_cast<uint64_t>(object));
492    }
493    return VK_FALSE;
494}
495
496static VkBool32 validate_image(VkCommandBuffer dispatchable_object, VkImage object)
497{
498    if ((VkImageMap.find(reinterpret_cast<uint64_t>(object))        == VkImageMap.end()) &&
499        (swapchainImageMap.find(reinterpret_cast<uint64_t>(object)) == swapchainImageMap.end())) {
500        return log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, (uint64_t) object, 0, OBJTRACK_INVALID_OBJECT, "OBJTRACK",
501            "Invalid VkImage Object %" PRIu64, reinterpret_cast<uint64_t>(object));
502    }
503    return VK_FALSE;
504}
505
506static VkBool32 validate_command_buffer(VkQueue dispatchable_object, VkCommandBuffer object)
507{
508    if (VkCommandBufferMap.find(reinterpret_cast<uint64_t>(object)) == VkCommandBufferMap.end()) {
509        return log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, reinterpret_cast<uint64_t>(object), 0, OBJTRACK_INVALID_OBJECT, "OBJTRACK",
510            "Invalid VkCommandBuffer Object %" PRIu64, reinterpret_cast<uint64_t>(object));
511    }
512    return VK_FALSE;
513}
514
515static VkBool32 validate_descriptor_set(VkCommandBuffer dispatchable_object, VkDescriptorSet object)
516{
517    if (VkDescriptorSetMap.find(reinterpret_cast<uint64_t>(object)) == VkDescriptorSetMap.end()) {
518        return log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, (uint64_t) object, 0, OBJTRACK_INVALID_OBJECT, "OBJTRACK",
519            "Invalid VkDescriptorSet Object %" PRIu64, reinterpret_cast<uint64_t>(object));
520    }
521    return VK_FALSE;
522}
523
524static VkBool32 validate_buffer(VkQueue dispatchable_object, VkBuffer object)
525{
526    if (VkBufferMap.find(reinterpret_cast<uint64_t>(object)) != VkBufferMap.end()) {
527        return log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, (uint64_t) object, 0, OBJTRACK_INVALID_OBJECT, "OBJTRACK",
528            "Invalid VkBuffer Object %" PRIu64, reinterpret_cast<uint64_t>(object));
529    }
530    return VK_FALSE;
531}
532
533static VkBool32 set_status(VkQueue dispatchable_object, VkFence object, VkDebugReportObjectTypeEXT objType, ObjectStatusFlags status_flag)
534{
535    VkBool32 skipCall = VK_FALSE;
536    if (object != VK_NULL_HANDLE) {
537        if (VkFenceMap.find(reinterpret_cast<uint64_t>(object)) != VkFenceMap.end()) {
538            OBJTRACK_NODE* pNode = VkFenceMap[reinterpret_cast<uint64_t>(object)];
539            pNode->status |= status_flag;
540        }
541        else {
542            // If we do not find it print an error
543            skipCall |= log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, (uint64_t) object, 0, OBJTRACK_NONE, "OBJTRACK",
544                "Unable to set status for non-existent object 0x%" PRIxLEAST64 " of %s type",
545                reinterpret_cast<uint64_t>(object), string_VkDebugReportObjectTypeEXT(objType));
546        }
547    }
548    return skipCall;
549}
550
551static VkBool32 validate_semaphore(VkQueue dispatchable_object, VkSemaphore object)
552{
553    if (VkSemaphoreMap.find(reinterpret_cast<uint64_t>(object)) == VkSemaphoreMap.end()) {
554        return log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, (uint64_t) object, 0, OBJTRACK_INVALID_OBJECT, "OBJTRACK",
555            "Invalid VkSemaphore Object %" PRIu64, reinterpret_cast<uint64_t>(object));
556    }
557    return VK_FALSE;
558}
559
560static VkBool32 validate_command_buffer(VkDevice dispatchable_object, VkCommandBuffer object)
561{
562    if (VkCommandBufferMap.find(reinterpret_cast<uint64_t>(object)) == VkCommandBufferMap.end()) {
563        return log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, reinterpret_cast<uint64_t>(object), 0, OBJTRACK_INVALID_OBJECT, "OBJTRACK",
564            "Invalid VkCommandBuffer Object %" PRIu64, reinterpret_cast<uint64_t>(object));
565    }
566    return VK_FALSE;
567}
568
569static void create_physical_device(VkInstance dispatchable_object, VkPhysicalDevice vkObj, VkDebugReportObjectTypeEXT objType)
570{
571    log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFO_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), 0, OBJTRACK_NONE, "OBJTRACK",
572        "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64 , object_track_index++, string_VkDebugReportObjectTypeEXT(objType),
573        reinterpret_cast<uint64_t>(vkObj));
574
575    OBJTRACK_NODE* pNewObjNode = new OBJTRACK_NODE;
576    pNewObjNode->objType = objType;
577    pNewObjNode->status  = OBJSTATUS_NONE;
578    pNewObjNode->vkObj  = reinterpret_cast<uint64_t>(vkObj);
579    VkPhysicalDeviceMap[reinterpret_cast<uint64_t>(vkObj)] = pNewObjNode;
580    uint32_t objIndex = objTypeToIndex(objType);
581    numObjs[objIndex]++;
582    numTotalObjs++;
583}
584
585static void create_surface_khr(VkInstance instance, VkSurfaceKHR surface, VkDebugReportObjectTypeEXT objType)
586{
587    // TODO: Add tracking of surface objects
588}
589
590static void destroy_surface_khr(VkInstance instance, VkSurfaceKHR surface)
591{
592    // TODO: Add tracking of surface objects
593}
594
595static void alloc_command_buffer(VkDevice device, VkCommandPool commandPool, VkCommandBuffer vkObj, VkDebugReportObjectTypeEXT objType)
596{
597    log_msg(mdd(device), VK_DEBUG_REPORT_INFO_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), 0, OBJTRACK_NONE, "OBJTRACK",
598        "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64 , object_track_index++, string_VkDebugReportObjectTypeEXT(objType),
599        reinterpret_cast<uint64_t>(vkObj));
600
601    OBJTRACK_NODE* pNewObjNode = new OBJTRACK_NODE;
602    pNewObjNode->objType   = objType;
603    pNewObjNode->status    = OBJSTATUS_NONE;
604    pNewObjNode->vkObj     = reinterpret_cast<uint64_t>(vkObj);
605    pNewObjNode->parentObj = (uint64_t) commandPool;
606    VkCommandBufferMap[reinterpret_cast<uint64_t>(vkObj)] = pNewObjNode;
607    uint32_t objIndex = objTypeToIndex(objType);
608    numObjs[objIndex]++;
609    numTotalObjs++;
610}
611
612static void free_command_buffer(VkDevice device, VkCommandPool commandPool, VkCommandBuffer commandBuffer)
613{
614    uint64_t object_handle = reinterpret_cast<uint64_t>(commandBuffer);
615    if (VkCommandBufferMap.find(object_handle) != VkCommandBufferMap.end()) {
616        OBJTRACK_NODE* pNode = VkCommandBufferMap[(uint64_t)commandBuffer];
617
618       if (pNode->parentObj != reinterpret_cast<uint64_t>(commandPool)) {
619           log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, pNode->objType, object_handle, 0, OBJTRACK_COMMAND_POOL_MISMATCH, "OBJTRACK",
620               "FreeCommandBuffers is attempting to free Command Buffer 0x%" PRIxLEAST64 " belonging to Command Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").",
621               reinterpret_cast<uint64_t>(commandBuffer), pNode->parentObj, reinterpret_cast<uint64_t>(commandPool));
622       } else {
623
624            uint32_t objIndex = objTypeToIndex(pNode->objType);
625            assert(numTotalObjs > 0);
626            numTotalObjs--;
627            assert(numObjs[objIndex] > 0);
628            numObjs[objIndex]--;
629            log_msg(mdd(device), VK_DEBUG_REPORT_INFO_BIT_EXT, pNode->objType, object_handle, 0, OBJTRACK_NONE, "OBJTRACK",
630               "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
631                string_VkDebugReportObjectTypeEXT(pNode->objType), reinterpret_cast<uint64_t>(commandBuffer), numTotalObjs, numObjs[objIndex],
632                string_VkDebugReportObjectTypeEXT(pNode->objType));
633            delete pNode;
634            VkCommandBufferMap.erase(object_handle);
635        }
636    } else {
637        log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, object_handle, 0, OBJTRACK_NONE, "OBJTRACK",
638            "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?",
639           object_handle);
640    }
641}
642
643static void alloc_descriptor_set(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSet vkObj, VkDebugReportObjectTypeEXT objType)
644{
645    log_msg(mdd(device), VK_DEBUG_REPORT_INFO_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), 0, OBJTRACK_NONE, "OBJTRACK",
646        "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64 , object_track_index++, string_VkDebugReportObjectTypeEXT(objType),
647        reinterpret_cast<uint64_t>(vkObj));
648
649    OBJTRACK_NODE* pNewObjNode = new OBJTRACK_NODE;
650    pNewObjNode->objType   = objType;
651    pNewObjNode->status    = OBJSTATUS_NONE;
652    pNewObjNode->vkObj     = reinterpret_cast<uint64_t>(vkObj);
653    pNewObjNode->parentObj = (uint64_t) descriptorPool;
654    VkDescriptorSetMap[(uint64_t)vkObj] = pNewObjNode;
655    uint32_t objIndex = objTypeToIndex(objType);
656    numObjs[objIndex]++;
657    numTotalObjs++;
658}
659
660static void free_descriptor_set(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSet descriptorSet)
661{
662    uint64_t object_handle = reinterpret_cast<uint64_t>(descriptorSet);
663    if (VkDescriptorSetMap.find(object_handle) != VkDescriptorSetMap.end()) {
664        OBJTRACK_NODE* pNode = VkDescriptorSetMap[(uint64_t)descriptorSet];
665
666        if (pNode->parentObj != reinterpret_cast<uint64_t>(descriptorPool)) {
667            log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, pNode->objType, object_handle, 0, OBJTRACK_DESCRIPTOR_POOL_MISMATCH, "OBJTRACK",
668                "FreeDescriptorSets is attempting to free descriptorSet 0x%" PRIxLEAST64 " belonging to Descriptor Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").",
669                reinterpret_cast<uint64_t>(descriptorSet), pNode->parentObj, reinterpret_cast<uint64_t>(descriptorPool));
670        } else {
671            uint32_t objIndex = objTypeToIndex(pNode->objType);
672            assert(numTotalObjs > 0);
673            numTotalObjs--;
674            assert(numObjs[objIndex] > 0);
675            numObjs[objIndex]--;
676            log_msg(mdd(device), VK_DEBUG_REPORT_INFO_BIT_EXT, pNode->objType, object_handle, 0, OBJTRACK_NONE, "OBJTRACK",
677               "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
678                string_VkDebugReportObjectTypeEXT(pNode->objType), reinterpret_cast<uint64_t>(descriptorSet), numTotalObjs, numObjs[objIndex],
679                string_VkDebugReportObjectTypeEXT(pNode->objType));
680            delete pNode;
681            VkDescriptorSetMap.erase(object_handle);
682        }
683    } else {
684        log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, object_handle, 0, OBJTRACK_NONE, "OBJTRACK",
685            "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?",
686           object_handle);
687    }
688}
689
690static void create_swapchain_khr(VkDevice dispatchable_object, VkSwapchainKHR vkObj, VkDebugReportObjectTypeEXT objType)
691{
692    log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFO_BIT_EXT, objType, (uint64_t) vkObj, 0, OBJTRACK_NONE, "OBJTRACK",
693        "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64 , object_track_index++, string_VkDebugReportObjectTypeEXT(objType),
694        reinterpret_cast<uint64_t>(vkObj));
695
696    OBJTRACK_NODE* pNewObjNode = new OBJTRACK_NODE;
697    pNewObjNode->objType = objType;
698    pNewObjNode->status  = OBJSTATUS_NONE;
699    pNewObjNode->vkObj  = (uint64_t) vkObj;
700    VkSwapchainKHRMap[reinterpret_cast<uint64_t>(vkObj)] = pNewObjNode;
701    uint32_t objIndex = objTypeToIndex(objType);
702    numObjs[objIndex]++;
703    numTotalObjs++;
704}
705static void create_queue(VkDevice dispatchable_object, VkQueue vkObj, VkDebugReportObjectTypeEXT objType)
706{
707    log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFO_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), 0, OBJTRACK_NONE, "OBJTRACK",
708        "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64 , object_track_index++, string_VkDebugReportObjectTypeEXT(objType),
709        reinterpret_cast<uint64_t>(vkObj));
710
711    OBJTRACK_NODE* pNewObjNode = new OBJTRACK_NODE;
712    pNewObjNode->objType = objType;
713    pNewObjNode->status  = OBJSTATUS_NONE;
714    pNewObjNode->vkObj  = reinterpret_cast<uint64_t>(vkObj);
715    VkQueueMap[reinterpret_cast<uint64_t>(vkObj)] = pNewObjNode;
716    uint32_t objIndex = objTypeToIndex(objType);
717    numObjs[objIndex]++;
718    numTotalObjs++;
719}
720static void create_swapchain_image_obj(VkDevice dispatchable_object, VkImage vkObj, VkSwapchainKHR swapchain)
721{
722    log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFO_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, (uint64_t) vkObj, 0, OBJTRACK_NONE, "OBJTRACK",
723        "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64 , object_track_index++, "SwapchainImage",
724        reinterpret_cast<uint64_t>(vkObj));
725
726    OBJTRACK_NODE* pNewObjNode             = new OBJTRACK_NODE;
727    pNewObjNode->objType                   = VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT;
728    pNewObjNode->status                    = OBJSTATUS_NONE;
729    pNewObjNode->vkObj                     = (uint64_t) vkObj;
730    pNewObjNode->parentObj                 = (uint64_t) swapchain;
731    swapchainImageMap[reinterpret_cast<uint64_t>(vkObj)] = pNewObjNode;
732}
733
734static void destroy_swapchain(VkDevice dispatchable_object, VkSwapchainKHR object)
735{
736    if (VkSwapchainKHRMap.find(reinterpret_cast<uint64_t>(object)) != VkSwapchainKHRMap.end()) {
737        OBJTRACK_NODE* pNode = VkSwapchainKHRMap[reinterpret_cast<uint64_t>(object)];
738        uint32_t objIndex = objTypeToIndex(pNode->objType);
739        assert(numTotalObjs > 0);
740        numTotalObjs--;
741        assert(numObjs[objIndex] > 0);
742        numObjs[objIndex]--;
743        log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFO_BIT_EXT, pNode->objType, (uint64_t) object, 0, OBJTRACK_NONE, "OBJTRACK",
744           "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
745            string_VkDebugReportObjectTypeEXT(pNode->objType), (uint64_t) object, numTotalObjs, numObjs[objIndex],
746            string_VkDebugReportObjectTypeEXT(pNode->objType));
747        delete pNode;
748        VkSwapchainKHRMap.erase(reinterpret_cast<uint64_t>(object));
749    } else {
750        log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, (uint64_t) object, 0, OBJTRACK_NONE, "OBJTRACK",
751            "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?",
752           reinterpret_cast<uint64_t>(object));
753    }
754}
755//
756// Non-auto-generated API functions called by generated code
757//
758VkResult
759explicit_CreateInstance(
760    const VkInstanceCreateInfo *pCreateInfo,
761    const VkAllocationCallbacks     * pAllocator,
762    VkInstance                 * pInstance)
763{
764
765    VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(object_tracker_instance_table_map, *pInstance);
766    VkResult result = pInstanceTable->CreateInstance(pCreateInfo, pAllocator, pInstance);
767
768    if (result == VK_SUCCESS) {
769        layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
770        my_data->report_data = debug_report_create_instance(
771                                   pInstanceTable,
772                                   *pInstance,
773                                   pCreateInfo->enabledExtensionNameCount,
774                                   pCreateInfo->ppEnabledExtensionNames);
775        createInstanceRegisterExtensions(pCreateInfo, *pInstance);
776
777        initObjectTracker(my_data, pAllocator);
778        create_instance(*pInstance, *pInstance, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT);
779    }
780    return result;
781}
782
783void
784explicit_GetPhysicalDeviceQueueFamilyProperties(
785    VkPhysicalDevice                 gpu,
786    uint32_t*                        pCount,
787    VkQueueFamilyProperties*         pProperties)
788{
789    get_dispatch_table(object_tracker_instance_table_map, gpu)->GetPhysicalDeviceQueueFamilyProperties(gpu, pCount, pProperties);
790
791    loader_platform_thread_lock_mutex(&objLock);
792    if (pProperties != NULL)
793        setGpuQueueInfoState(*pCount, pProperties);
794    loader_platform_thread_unlock_mutex(&objLock);
795}
796
797VkResult
798explicit_CreateDevice(
799    VkPhysicalDevice         gpu,
800    const VkDeviceCreateInfo *pCreateInfo,
801    const VkAllocationCallbacks   *pAllocator,
802    VkDevice                 *pDevice)
803{
804    loader_platform_thread_lock_mutex(&objLock);
805    VkLayerDispatchTable *pDeviceTable = get_dispatch_table(object_tracker_device_table_map, *pDevice);
806    VkResult result = pDeviceTable->CreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
807    if (result == VK_SUCCESS) {
808        layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
809        layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
810        my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
811        create_device(*pDevice, *pDevice, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT);
812        createDeviceRegisterExtensions(pCreateInfo, *pDevice);
813    }
814
815    loader_platform_thread_unlock_mutex(&objLock);
816    return result;
817}
818
819VkResult explicit_EnumeratePhysicalDevices(VkInstance instance, uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices)
820{
821    VkBool32 skipCall = VK_FALSE;
822    loader_platform_thread_lock_mutex(&objLock);
823    skipCall |= validate_instance(instance, instance);
824    loader_platform_thread_unlock_mutex(&objLock);
825    if (skipCall)
826        return VK_ERROR_VALIDATION_FAILED_EXT;
827    VkResult result = get_dispatch_table(object_tracker_instance_table_map, instance)->EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
828    loader_platform_thread_lock_mutex(&objLock);
829    if (result == VK_SUCCESS) {
830        if (pPhysicalDevices) {
831            for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
832                create_physical_device(instance, pPhysicalDevices[i], VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT);
833            }
834        }
835    }
836    loader_platform_thread_unlock_mutex(&objLock);
837    return result;
838}
839
840void
841explicit_GetDeviceQueue(
842    VkDevice  device,
843    uint32_t  queueNodeIndex,
844    uint32_t  queueIndex,
845    VkQueue  *pQueue)
846{
847    loader_platform_thread_lock_mutex(&objLock);
848    validate_device(device, device);
849    loader_platform_thread_unlock_mutex(&objLock);
850
851    get_dispatch_table(object_tracker_device_table_map, device)->GetDeviceQueue(device, queueNodeIndex, queueIndex, pQueue);
852
853    loader_platform_thread_lock_mutex(&objLock);
854    addQueueInfo(queueNodeIndex, *pQueue);
855    create_queue(device, *pQueue, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT);
856    loader_platform_thread_unlock_mutex(&objLock);
857}
858
859VkResult
860explicit_MapMemory(
861    VkDevice         device,
862    VkDeviceMemory   mem,
863    VkDeviceSize     offset,
864    VkDeviceSize     size,
865    VkFlags          flags,
866    void           **ppData)
867{
868    VkBool32 skipCall = VK_FALSE;
869    loader_platform_thread_lock_mutex(&objLock);
870    skipCall |= set_device_memory_status(device, mem, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, OBJSTATUS_GPU_MEM_MAPPED);
871    skipCall |= validate_device(device, device);
872    loader_platform_thread_unlock_mutex(&objLock);
873    if (skipCall == VK_TRUE)
874        return VK_ERROR_VALIDATION_FAILED_EXT;
875
876    VkResult result = get_dispatch_table(object_tracker_device_table_map, device)->MapMemory(device, mem, offset, size, flags, ppData);
877
878    return result;
879}
880
881void
882explicit_UnmapMemory(
883    VkDevice       device,
884    VkDeviceMemory mem)
885{
886    VkBool32 skipCall = VK_FALSE;
887    loader_platform_thread_lock_mutex(&objLock);
888    skipCall |= reset_device_memory_status(device, mem, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, OBJSTATUS_GPU_MEM_MAPPED);
889    skipCall |= validate_device(device, device);
890    loader_platform_thread_unlock_mutex(&objLock);
891    if (skipCall == VK_TRUE)
892        return;
893
894    get_dispatch_table(object_tracker_device_table_map, device)->UnmapMemory(device, mem);
895}
896
897VkResult
898explicit_QueueBindSparse(
899    VkQueue                       queue,
900    uint32_t                                    bindInfoCount,
901    const VkBindSparseInfo*                     pBindInfo,
902    VkFence                                     fence)
903{
904    loader_platform_thread_lock_mutex(&objLock);
905    validateQueueFlags(queue, "QueueBindSparse");
906
907    for (uint32_t i = 0; i < bindInfoCount; i++) {
908        for (uint32_t j = 0; j < pBindInfo[i].bufferBindCount; j++)
909            validate_buffer(queue, pBindInfo[i].pBufferBinds[j].buffer);
910        for (uint32_t j = 0; j < pBindInfo[i].imageOpaqueBindCount; j++)
911            validate_image(queue, pBindInfo[i].pImageOpaqueBinds[j].image);
912        for (uint32_t j = 0; j < pBindInfo[i].imageBindCount; j++)
913            validate_image(queue, pBindInfo[i].pImageBinds[j].image);
914    }
915
916    loader_platform_thread_unlock_mutex(&objLock);
917
918    VkResult result = get_dispatch_table(object_tracker_device_table_map, queue)->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
919    return result;
920}
921
922VkResult
923explicit_AllocateCommandBuffers(
924    VkDevice                           device,
925    const VkCommandBufferAllocateInfo *pAllocateInfo,
926    VkCommandBuffer*                   pCommandBuffers)
927{
928    VkBool32 skipCall = VK_FALSE;
929    loader_platform_thread_lock_mutex(&objLock);
930    skipCall |= validate_device(device, device);
931    skipCall |= validate_command_pool(device, pAllocateInfo->commandPool);
932    loader_platform_thread_unlock_mutex(&objLock);
933
934    if (skipCall) {
935        return VK_ERROR_VALIDATION_FAILED_EXT;
936    }
937
938    VkResult result = get_dispatch_table(object_tracker_device_table_map, device)->AllocateCommandBuffers(
939        device, pAllocateInfo, pCommandBuffers);
940
941    loader_platform_thread_lock_mutex(&objLock);
942    for (uint32_t i = 0; i < pAllocateInfo->bufferCount; i++) {
943        alloc_command_buffer(device, pAllocateInfo->commandPool, pCommandBuffers[i], VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT);
944    }
945    loader_platform_thread_unlock_mutex(&objLock);
946
947    return result;
948}
949
950VkResult
951explicit_AllocateDescriptorSets(
952    VkDevice                           device,
953    const VkDescriptorSetAllocateInfo *pAllocateInfo,
954    VkDescriptorSet                   *pDescriptorSets)
955{
956    VkBool32 skipCall = VK_FALSE;
957    loader_platform_thread_lock_mutex(&objLock);
958    skipCall |= validate_device(device, device);
959    skipCall |= validate_descriptor_pool(device, pAllocateInfo->descriptorPool);
960    for (uint32_t i = 0; i < pAllocateInfo->setLayoutCount; i++) {
961        skipCall |= validate_descriptor_set_layout(device, pAllocateInfo->pSetLayouts[i]);
962    }
963    loader_platform_thread_unlock_mutex(&objLock);
964    if (skipCall)
965        return VK_ERROR_VALIDATION_FAILED_EXT;
966
967    VkResult result = get_dispatch_table(object_tracker_device_table_map, device)->AllocateDescriptorSets(
968        device, pAllocateInfo, pDescriptorSets);
969
970    loader_platform_thread_lock_mutex(&objLock);
971    for (uint32_t i = 0; i < pAllocateInfo->setLayoutCount; i++) {
972        alloc_descriptor_set(device, pAllocateInfo->descriptorPool, pDescriptorSets[i], VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT);
973    }
974    loader_platform_thread_unlock_mutex(&objLock);
975
976    return result;
977}
978
979void
980explicit_FreeCommandBuffers(
981    VkDevice               device,
982    VkCommandPool          commandPool,
983    uint32_t               commandBufferCount,
984    const VkCommandBuffer *pCommandBuffers)
985{
986    loader_platform_thread_lock_mutex(&objLock);
987    validate_command_pool(device, commandPool);
988    validate_device(device, device);
989    loader_platform_thread_unlock_mutex(&objLock);
990
991    get_dispatch_table(object_tracker_device_table_map, device)->FreeCommandBuffers(device,
992        commandPool, commandBufferCount, pCommandBuffers);
993
994    loader_platform_thread_lock_mutex(&objLock);
995    for (uint32_t i = 0; i < commandBufferCount; i++)
996    {
997        free_command_buffer(device, commandPool, *pCommandBuffers);
998        pCommandBuffers++;
999    }
1000    loader_platform_thread_unlock_mutex(&objLock);
1001}
1002
1003void
1004explicit_DestroySwapchainKHR(
1005    VkDevice                    device,
1006    VkSwapchainKHR              swapchain,
1007    const VkAllocationCallbacks *pAllocator)
1008{
1009    loader_platform_thread_lock_mutex(&objLock);
1010    // A swapchain's images are implicitly deleted when the swapchain is deleted.
1011    // Remove this swapchain's images from our map of such images.
1012    unordered_map<uint64_t, OBJTRACK_NODE*>::iterator itr = swapchainImageMap.begin();
1013    while (itr != swapchainImageMap.end()) {
1014        OBJTRACK_NODE* pNode = (*itr).second;
1015        if (pNode->parentObj == reinterpret_cast<uint64_t>(swapchain)) {
1016           swapchainImageMap.erase(itr++);
1017        } else {
1018           ++itr;
1019        }
1020    }
1021    destroy_swapchain(device, swapchain);
1022    loader_platform_thread_unlock_mutex(&objLock);
1023
1024    get_dispatch_table(object_tracker_device_table_map, device)->DestroySwapchainKHR(device, swapchain, pAllocator);
1025}
1026
1027void
1028explicit_FreeMemory(
1029    VkDevice       device,
1030    VkDeviceMemory mem,
1031    const VkAllocationCallbacks* pAllocator)
1032{
1033    loader_platform_thread_lock_mutex(&objLock);
1034    validate_device(device, device);
1035    loader_platform_thread_unlock_mutex(&objLock);
1036
1037    get_dispatch_table(object_tracker_device_table_map, device)->FreeMemory(device, mem, pAllocator);
1038
1039    loader_platform_thread_lock_mutex(&objLock);
1040    destroy_device_memory(device, mem);
1041    loader_platform_thread_unlock_mutex(&objLock);
1042}
1043
1044VkResult
1045explicit_FreeDescriptorSets(
1046    VkDevice               device,
1047    VkDescriptorPool       descriptorPool,
1048    uint32_t               count,
1049    const VkDescriptorSet *pDescriptorSets)
1050{
1051    loader_platform_thread_lock_mutex(&objLock);
1052    validate_descriptor_pool(device, descriptorPool);
1053    validate_device(device, device);
1054    loader_platform_thread_unlock_mutex(&objLock);
1055    VkResult result = get_dispatch_table(object_tracker_device_table_map, device)->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
1056
1057    loader_platform_thread_lock_mutex(&objLock);
1058    for (uint32_t i=0; i<count; i++)
1059    {
1060        free_descriptor_set(device, descriptorPool, *pDescriptorSets++);
1061    }
1062    loader_platform_thread_unlock_mutex(&objLock);
1063    return result;
1064}
1065
1066void
1067explicit_DestroyDescriptorPool(
1068    VkDevice                     device,
1069    VkDescriptorPool             descriptorPool,
1070    const VkAllocationCallbacks *pAllocator)
1071{
1072    VkBool32 skipCall = VK_FALSE;
1073    loader_platform_thread_lock_mutex(&objLock);
1074    skipCall |= validate_device(device, device);
1075    skipCall |= validate_descriptor_pool(device, descriptorPool);
1076    loader_platform_thread_unlock_mutex(&objLock);
1077    if (skipCall) {
1078        return;
1079    }
1080    // A DescriptorPool's descriptor sets are implicitly deleted when the pool is deleted.
1081    // Remove this pool's descriptor sets from our descriptorSet map.
1082    loader_platform_thread_lock_mutex(&objLock);
1083    unordered_map<uint64_t, OBJTRACK_NODE*>::iterator itr = VkDescriptorSetMap.begin();
1084    while (itr != VkDescriptorSetMap.end()) {
1085        OBJTRACK_NODE* pNode = (*itr).second;
1086        auto del_itr = itr++;
1087        if (pNode->parentObj == reinterpret_cast<uint64_t>(descriptorPool)) {
1088            destroy_descriptor_set(device, reinterpret_cast<VkDescriptorSet>((*del_itr).first));
1089        }
1090    }
1091    destroy_descriptor_pool(device, descriptorPool);
1092    loader_platform_thread_unlock_mutex(&objLock);
1093    get_dispatch_table(object_tracker_device_table_map, device)->DestroyDescriptorPool(device, descriptorPool, pAllocator);
1094}
1095
1096void
1097explicit_DestroyCommandPool(
1098    VkDevice                     device,
1099    VkCommandPool                commandPool,
1100    const VkAllocationCallbacks *pAllocator)
1101{
1102    VkBool32 skipCall = VK_FALSE;
1103    loader_platform_thread_lock_mutex(&objLock);
1104    skipCall |= validate_device(device, device);
1105    skipCall |= validate_command_pool(device, commandPool);
1106    loader_platform_thread_unlock_mutex(&objLock);
1107    if (skipCall) {
1108        return;
1109    }
1110    loader_platform_thread_lock_mutex(&objLock);
1111    // A CommandPool's command buffers are implicitly deleted when the pool is deleted.
1112    // Remove this pool's cmdBuffers from our cmd buffer map.
1113    unordered_map<uint64_t, OBJTRACK_NODE*>::iterator itr = VkCommandBufferMap.begin();
1114    unordered_map<uint64_t, OBJTRACK_NODE*>::iterator del_itr;
1115    while (itr != VkCommandBufferMap.end()) {
1116        OBJTRACK_NODE* pNode = (*itr).second;
1117        del_itr = itr++;
1118        if (pNode->parentObj == reinterpret_cast<uint64_t>(commandPool)) {
1119            destroy_command_buffer(reinterpret_cast<VkCommandBuffer>((*del_itr).first),
1120                                   reinterpret_cast<VkCommandBuffer>((*del_itr).first));
1121        }
1122    }
1123    destroy_command_pool(device, commandPool);
1124    loader_platform_thread_unlock_mutex(&objLock);
1125    get_dispatch_table(object_tracker_device_table_map, device)->DestroyCommandPool(device, commandPool, pAllocator);
1126}
1127
1128VkResult
1129explicit_GetSwapchainImagesKHR(
1130    VkDevice        device,
1131    VkSwapchainKHR  swapchain,
1132    uint32_t       *pCount,
1133    VkImage        *pSwapchainImages)
1134{
1135    VkBool32 skipCall = VK_FALSE;
1136    loader_platform_thread_lock_mutex(&objLock);
1137    skipCall |= validate_device(device, device);
1138    loader_platform_thread_unlock_mutex(&objLock);
1139    if (skipCall)
1140        return VK_ERROR_VALIDATION_FAILED_EXT;
1141
1142    VkResult result = get_dispatch_table(object_tracker_device_table_map, device)->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
1143
1144    if (pSwapchainImages != NULL) {
1145        loader_platform_thread_lock_mutex(&objLock);
1146        for (uint32_t i = 0; i < *pCount; i++) {
1147            create_swapchain_image_obj(device, pSwapchainImages[i], swapchain);
1148        }
1149        loader_platform_thread_unlock_mutex(&objLock);
1150    }
1151    return result;
1152}
1153
1154