object_tracker.h revision 884f0b6ae54f8a82336692498fdcf1a318b5e39c
1/*
2 *
3 * Copyright (C) 2015 Valve Corporation
4 * Copyright (C) 2015 Google Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Author: Jon Ashburn <jon@lunarg.com>
25 * Author: Mark Lobodzinski <mark@lunarg.com>
26 * Author: Tobin Ehlis <tobin@lunarg.com>
27 */
28
29#include "vulkan/vk_layer.h"
30#include "vk_layer_extension_utils.h"
31#include "vk_enum_string_helper.h"
32#include "vk_layer_table.h"
33
34// Object Tracker ERROR codes
35typedef enum _OBJECT_TRACK_ERROR
36{
37    OBJTRACK_NONE,                              // Used for INFO & other non-error messages
38    OBJTRACK_UNKNOWN_OBJECT,                    // Updating uses of object that's not in global object list
39    OBJTRACK_INTERNAL_ERROR,                    // Bug with data tracking within the layer
40    OBJTRACK_DESTROY_OBJECT_FAILED,             // Couldn't find object to be destroyed
41    OBJTRACK_OBJECT_LEAK,                       // OBJECT was not correctly freed/destroyed
42    OBJTRACK_OBJCOUNT_MAX_EXCEEDED,             // Request for Object data in excess of max obj count
43    OBJTRACK_INVALID_OBJECT,                    // Object used that has never been created
44    OBJTRACK_DESCRIPTOR_POOL_MISMATCH,          // Descriptor Pools specified incorrectly
45    OBJTRACK_COMMAND_POOL_MISMATCH,             // Command Pools specified incorrectly
46} OBJECT_TRACK_ERROR;
47
48// Object Status -- used to track state of individual objects
49typedef VkFlags ObjectStatusFlags;
50typedef enum _ObjectStatusFlagBits
51{
52    OBJSTATUS_NONE                              = 0x00000000, // No status is set
53    OBJSTATUS_FENCE_IS_SUBMITTED                = 0x00000001, // Fence has been submitted
54    OBJSTATUS_VIEWPORT_BOUND                    = 0x00000002, // Viewport state object has been bound
55    OBJSTATUS_RASTER_BOUND                      = 0x00000004, // Viewport state object has been bound
56    OBJSTATUS_COLOR_BLEND_BOUND                 = 0x00000008, // Viewport state object has been bound
57    OBJSTATUS_DEPTH_STENCIL_BOUND               = 0x00000010, // Viewport state object has been bound
58    OBJSTATUS_GPU_MEM_MAPPED                    = 0x00000020, // Memory object is currently mapped
59    OBJSTATUS_COMMAND_BUFFER_SECONDARY          = 0x00000040, // Command Buffer is of type SECONDARY
60} ObjectStatusFlagBits;
61
62typedef struct _OBJTRACK_NODE {
63    uint64_t                   vkObj;           // Object handle
64    VkDebugReportObjectTypeEXT objType;         // Object type identifier
65    ObjectStatusFlags          status;          // Object state
66    uint64_t                   parentObj;       // Parent object
67} OBJTRACK_NODE;
68
69// prototype for extension functions
70uint64_t objTrackGetObjectCount(VkDevice device);
71uint64_t objTrackGetObjectsOfTypeCount(VkDevice, VkDebugReportObjectTypeEXT type);
72
73// Func ptr typedefs
74typedef uint64_t (*OBJ_TRACK_GET_OBJECT_COUNT)(VkDevice);
75typedef uint64_t (*OBJ_TRACK_GET_OBJECTS_OF_TYPE_COUNT)(VkDevice, VkDebugReportObjectTypeEXT);
76
77struct layer_data {
78    debug_report_data *report_data;
79    //TODO: put instance data here
80    VkDebugReportCallbackEXT   logging_callback;
81    bool wsi_enabled;
82    bool objtrack_extensions_enabled;
83
84    layer_data() :
85        report_data(nullptr),
86        logging_callback(VK_NULL_HANDLE),
87        wsi_enabled(false),
88        objtrack_extensions_enabled(false)
89    {};
90};
91
92struct instExts {
93    bool wsi_enabled;
94};
95
96static std::unordered_map<void *, struct instExts> instanceExtMap;
97static std::unordered_map<void*, layer_data *> layer_data_map;
98static device_table_map                        object_tracker_device_table_map;
99static instance_table_map                      object_tracker_instance_table_map;
100
101// We need additionally validate image usage using a separate map
102// of swapchain-created images
103static unordered_map<uint64_t, OBJTRACK_NODE*> swapchainImageMap;
104
105static long long unsigned int object_track_index = 0;
106static int objLockInitialized = 0;
107static loader_platform_thread_mutex objLock;
108
109// Objects stored in a global map w/ struct containing basic info
110// unordered_map<const void*, OBJTRACK_NODE*> objMap;
111
112#define NUM_OBJECT_TYPES (VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT+1)
113
114static uint64_t                         numObjs[NUM_OBJECT_TYPES]     = {0};
115static uint64_t                         numTotalObjs                  = 0;
116static VkQueueFamilyProperties         *queueInfo                     = NULL;
117static uint32_t                         queueCount                    = 0;
118
119template layer_data *get_my_data_ptr<layer_data>(
120        void *data_key, std::unordered_map<void *, layer_data *> &data_map);
121
122static inline const char* string_VkDebugReportObjectTypeEXT(VkDebugReportObjectTypeEXT input_value)
123{
124    switch ((VkDebugReportObjectTypeEXT)input_value)
125    {
126        case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT:
127            return "VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT";
128        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
129            return "VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT";
130        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT:
131            return "VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT";
132        case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT:
133            return "VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT";
134        case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT:
135            return "VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT";
136        case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT:
137            return "VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT";
138        case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT:
139            return "VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT";
140        case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT:
141            return "VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT";
142        case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT:
143            return "VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT";
144        case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT:
145            return "VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT";
146        case VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT:
147            return "VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT";
148        case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT:
149            return "VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT";
150        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
151            return "VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT";
152        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT:
153            return "VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT";
154        case VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT:
155            return "VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT";
156        case VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT:
157            return "VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT";
158        case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT:
159            return "VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT";
160        case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT:
161            return "VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT";
162        case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT:
163            return "VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT";
164        case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT:
165            return "VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT";
166        case VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT:
167            return "VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT";
168        case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT:
169            return "VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT";
170        case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT:
171            return "VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT";
172        case VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT:
173            return "VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT";
174        case VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT:
175            return "VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT";
176        case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
177            return "VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT";
178        default:
179            return "Unhandled VkObjectType";
180    }
181}
182
183//
184// Internal Object Tracker Functions
185//
186
187static void createDeviceRegisterExtensions(const VkDeviceCreateInfo* pCreateInfo, VkDevice device)
188{
189    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
190    VkLayerDispatchTable *pDisp = get_dispatch_table(object_tracker_device_table_map, device);
191    PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
192    pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR) gpa(device, "vkCreateSwapchainKHR");
193    pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR) gpa(device, "vkDestroySwapchainKHR");
194    pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR) gpa(device, "vkGetSwapchainImagesKHR");
195    pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR) gpa(device, "vkAcquireNextImageKHR");
196    pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR) gpa(device, "vkQueuePresentKHR");
197    my_device_data->wsi_enabled = false;
198    for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
199        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
200            my_device_data->wsi_enabled = true;
201
202        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], "OBJTRACK_EXTENSIONS") == 0)
203            my_device_data->objtrack_extensions_enabled = true;
204    }
205}
206
207static void createInstanceRegisterExtensions(const VkInstanceCreateInfo* pCreateInfo, VkInstance instance)
208{
209    uint32_t i;
210    VkLayerInstanceDispatchTable *pDisp = get_dispatch_table(object_tracker_instance_table_map, instance);
211    PFN_vkGetInstanceProcAddr gpa = pDisp->GetInstanceProcAddr;
212    pDisp->GetPhysicalDeviceSurfaceSupportKHR = (PFN_vkGetPhysicalDeviceSurfaceSupportKHR) gpa(instance, "vkGetPhysicalDeviceSurfaceSupportKHR");
213    pDisp->GetPhysicalDeviceSurfaceCapabilitiesKHR = (PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR) gpa(instance, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR");
214    pDisp->GetPhysicalDeviceSurfaceFormatsKHR = (PFN_vkGetPhysicalDeviceSurfaceFormatsKHR) gpa(instance, "vkGetPhysicalDeviceSurfaceFormatsKHR");
215    pDisp->GetPhysicalDeviceSurfacePresentModesKHR = (PFN_vkGetPhysicalDeviceSurfacePresentModesKHR) gpa(instance, "vkGetPhysicalDeviceSurfacePresentModesKHR");
216
217#if VK_USE_PLATFORM_WIN32_KHR
218    pDisp->CreateWin32SurfaceKHR = (PFN_vkCreateWin32SurfaceKHR) gpa(instance, "vkCreateWin32SurfaceKHR");
219    pDisp->GetPhysicalDeviceWin32PresentationSupportKHR = (PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceWin32PresentationSupportKHR");
220#endif // VK_USE_PLATFORM_WIN32_KHR
221#ifdef VK_USE_PLATFORM_XCB_KHR
222    pDisp->CreateXcbSurfaceKHR = (PFN_vkCreateXcbSurfaceKHR) gpa(instance, "vkCreateXcbSurfaceKHR");
223    pDisp->GetPhysicalDeviceXcbPresentationSupportKHR = (PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceXcbPresentationSupportKHR");
224#endif // VK_USE_PLATFORM_XCB_KHR
225#ifdef VK_USE_PLATFORM_XLIB_KHR
226    pDisp->CreateXlibSurfaceKHR = (PFN_vkCreateXlibSurfaceKHR) gpa(instance, "vkCreateXlibSurfaceKHR");
227    pDisp->GetPhysicalDeviceXlibPresentationSupportKHR = (PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceXlibPresentationSupportKHR");
228#endif // VK_USE_PLATFORM_XLIB_KHR
229#ifdef VK_USE_PLATFORM_MIR_KHR
230    pDisp->CreateMirSurfaceKHR = (PFN_vkCreateMirSurfaceKHR) gpa(instance, "vkCreateMirSurfaceKHR");
231    pDisp->GetPhysicalDeviceMirPresentationSupportKHR = (PFN_vkGetPhysicalDeviceMirPresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceMirPresentationSupportKHR");
232#endif // VK_USE_PLATFORM_MIR_KHR
233#ifdef VK_USE_PLATFORM_WAYLAND_KHR
234    pDisp->CreateWaylandSurfaceKHR = (PFN_vkCreateWaylandSurfaceKHR) gpa(instance, "vkCreateWaylandSurfaceKHR");
235    pDisp->GetPhysicalDeviceWaylandPresentationSupportKHR = (PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceWaylandPresentationSupportKHR");
236#endif //  VK_USE_PLATFORM_WAYLAND_KHR
237#ifdef VK_USE_PLATFORM_ANDROID_KHR
238    pDisp->CreateAndroidSurfaceKHR = (PFN_vkCreateAndroidSurfaceKHR) gpa(instance, "vkCreateAndroidSurfaceKHR");
239#endif // VK_USE_PLATFORM_ANDROID_KHR
240
241    instanceExtMap[pDisp].wsi_enabled = false;
242    for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
243        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SURFACE_EXTENSION_NAME) == 0)
244            instanceExtMap[pDisp].wsi_enabled = true;
245
246    }
247}
248
249// Indicate device or instance dispatch table type
250typedef enum _DispTableType
251{
252    DISP_TBL_TYPE_INSTANCE,
253    DISP_TBL_TYPE_DEVICE,
254} DispTableType;
255
256debug_report_data *mdd(const void* object)
257{
258    dispatch_key key = get_dispatch_key(object);
259    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
260    return my_data->report_data;
261}
262
263debug_report_data *mid(VkInstance object)
264{
265    dispatch_key key = get_dispatch_key(object);
266    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
267    return my_data->report_data;
268}
269
270// For each Queue's doubly linked-list of mem refs
271typedef struct _OT_MEM_INFO {
272    VkDeviceMemory       mem;
273    struct _OT_MEM_INFO *pNextMI;
274    struct _OT_MEM_INFO *pPrevMI;
275
276} OT_MEM_INFO;
277
278// Track Queue information
279typedef struct _OT_QUEUE_INFO {
280    OT_MEM_INFO                     *pMemRefList;
281    struct _OT_QUEUE_INFO           *pNextQI;
282    uint32_t                         queueNodeIndex;
283    VkQueue                          queue;
284    uint32_t                         refCount;
285} OT_QUEUE_INFO;
286
287// Global list of QueueInfo structures, one per queue
288static OT_QUEUE_INFO *g_pQueueInfo = NULL;
289
290// Convert an object type enum to an object type array index
291static uint32_t
292objTypeToIndex(
293    uint32_t objType)
294{
295    uint32_t index = objType;
296    return index;
297}
298
299// Add new queue to head of global queue list
300static void
301addQueueInfo(
302    uint32_t queueNodeIndex,
303    VkQueue  queue)
304{
305    OT_QUEUE_INFO *pQueueInfo = new OT_QUEUE_INFO;
306
307    if (pQueueInfo != NULL) {
308        memset(pQueueInfo, 0, sizeof(OT_QUEUE_INFO));
309        pQueueInfo->queue       = queue;
310        pQueueInfo->queueNodeIndex = queueNodeIndex;
311        pQueueInfo->pNextQI   = g_pQueueInfo;
312        g_pQueueInfo          = pQueueInfo;
313    }
314    else {
315        log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, reinterpret_cast<uint64_t>(queue), __LINE__, OBJTRACK_INTERNAL_ERROR, "OBJTRACK",
316            "ERROR:  VK_ERROR_OUT_OF_HOST_MEMORY -- could not allocate memory for Queue Information");
317    }
318}
319
320// Destroy memRef lists and free all memory
321static void
322destroyQueueMemRefLists(void)
323{
324    OT_QUEUE_INFO *pQueueInfo    = g_pQueueInfo;
325    OT_QUEUE_INFO *pDelQueueInfo = NULL;
326    while (pQueueInfo != NULL) {
327        OT_MEM_INFO *pMemInfo = pQueueInfo->pMemRefList;
328        while (pMemInfo != NULL) {
329            OT_MEM_INFO *pDelMemInfo = pMemInfo;
330            pMemInfo = pMemInfo->pNextMI;
331            delete pDelMemInfo;
332        }
333        pDelQueueInfo = pQueueInfo;
334        pQueueInfo    = pQueueInfo->pNextQI;
335        delete pDelQueueInfo;
336    }
337    g_pQueueInfo = pQueueInfo;
338}
339
340static void
341setGpuQueueInfoState(
342    uint32_t  count,
343    void     *pData)
344{
345    queueCount = count;
346    queueInfo  = (VkQueueFamilyProperties*)realloc((void*)queueInfo, count * sizeof(VkQueueFamilyProperties));
347    if (queueInfo != NULL) {
348        memcpy(queueInfo, pData, count * sizeof(VkQueueFamilyProperties));
349    }
350}
351
352// Check Queue type flags for selected queue operations
353static void
354validateQueueFlags(
355    VkQueue     queue,
356    const char *function)
357{
358    OT_QUEUE_INFO *pQueueInfo = g_pQueueInfo;
359    while ((pQueueInfo != NULL) && (pQueueInfo->queue != queue)) {
360        pQueueInfo = pQueueInfo->pNextQI;
361    }
362    if (pQueueInfo != NULL) {
363        if ((queueInfo != NULL) && (queueInfo[pQueueInfo->queueNodeIndex].queueFlags & VK_QUEUE_SPARSE_BINDING_BIT) == 0) {
364            log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, reinterpret_cast<uint64_t>(queue), __LINE__, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
365                "Attempting %s on a non-memory-management capable queue -- VK_QUEUE_SPARSE_BINDING_BIT not set", function);
366        } else {
367            log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, reinterpret_cast<uint64_t>(queue), __LINE__, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
368                "Attempting %s on a possibly non-memory-management capable queue -- VK_QUEUE_SPARSE_BINDING_BIT not known", function);
369        }
370    }
371}
372
373/* TODO: Port to new type safety */
374#if 0
375// Check object status for selected flag state
376static VkBool32
377validate_status(
378    VkObject            dispatchable_object,
379    VkObject            vkObj,
380    VkObjectType        objType,
381    ObjectStatusFlags   status_mask,
382    ObjectStatusFlags   status_flag,
383    VkFlags             msg_flags,
384    OBJECT_TRACK_ERROR  error_code,
385    const char         *fail_msg)
386{
387    if (objMap.find(vkObj) != objMap.end()) {
388        OBJTRACK_NODE* pNode = objMap[vkObj];
389        if ((pNode->status & status_mask) != status_flag) {
390            char str[1024];
391            log_msg(mdd(dispatchable_object), msg_flags, pNode->objType, vkObj, __LINE__, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
392                "OBJECT VALIDATION WARNING: %s object 0x%" PRIxLEAST64 ": %s", string_VkObjectType(objType),
393                static_cast<uint64_t>(vkObj), fail_msg);
394            return VK_FALSE;
395        }
396        return VK_TRUE;
397    }
398    else {
399        // If we do not find it print an error
400        log_msg(mdd(dispatchable_object), msg_flags, (VkObjectType) 0, vkObj, __LINE__, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
401            "Unable to obtain status for non-existent object 0x%" PRIxLEAST64 " of %s type",
402            static_cast<uint64_t>(vkObj), string_VkObjectType(objType));
403        return VK_FALSE;
404    }
405}
406#endif
407
408#include "vk_dispatch_table_helper.h"
409static void
410initObjectTracker(
411    layer_data *my_data,
412    const VkAllocationCallbacks *pAllocator)
413{
414    uint32_t report_flags = 0;
415    uint32_t debug_action = 0;
416    FILE *log_output = NULL;
417    const char *option_str;
418    // initialize ObjectTracker options
419    report_flags = getLayerOptionFlags("ObjectTrackerReportFlags", 0);
420    getLayerOptionEnum("ObjectTrackerDebugAction", (uint32_t *) &debug_action);
421
422    if (debug_action & VK_DBG_LAYER_ACTION_LOG_MSG)
423    {
424        option_str = getLayerOption("ObjectTrackerLogFilename");
425        log_output = getLayerLogOutput(option_str, "ObjectTracker");
426        VkDebugReportCallbackCreateInfoEXT dbgInfo;
427        memset(&dbgInfo, 0, sizeof(dbgInfo));
428        dbgInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
429        dbgInfo.pfnCallback = log_callback;
430        dbgInfo.pUserData = log_output;
431        dbgInfo.flags = report_flags;
432        layer_create_msg_callback(my_data->report_data, &dbgInfo, pAllocator, &my_data->logging_callback);
433    }
434
435    if (!objLockInitialized)
436    {
437        // TODO/TBD: Need to delete this mutex sometime.  How???  One
438        // suggestion is to call this during vkCreateInstance(), and then we
439        // can clean it up during vkDestroyInstance().  However, that requires
440        // that the layer have per-instance locks.  We need to come back and
441        // address this soon.
442        loader_platform_thread_create_mutex(&objLock);
443        objLockInitialized = 1;
444    }
445}
446
447//
448// Forward declares of generated routines
449//
450
451static void create_physical_device(VkInstance dispatchable_object, VkPhysicalDevice vkObj, VkDebugReportObjectTypeEXT objType);
452static void create_instance(VkInstance dispatchable_object, VkInstance object, VkDebugReportObjectTypeEXT objType);
453static void create_device(VkDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType);
454static void create_queue(VkDevice dispatchable_object, VkQueue vkObj, VkDebugReportObjectTypeEXT objType);
455static VkBool32 validate_image(VkQueue dispatchable_object, VkImage object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
456static VkBool32 validate_instance(VkInstance dispatchable_object, VkInstance object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
457static VkBool32 validate_device(VkDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
458static VkBool32 validate_descriptor_pool(VkDevice dispatchable_object, VkDescriptorPool object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
459static VkBool32 validate_descriptor_set_layout(VkDevice dispatchable_object, VkDescriptorSetLayout object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
460static VkBool32 validate_command_pool(VkDevice dispatchable_object, VkCommandPool object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
461static VkBool32 validate_buffer(VkQueue dispatchable_object, VkBuffer object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
462static void create_pipeline(VkDevice dispatchable_object, VkPipeline vkObj, VkDebugReportObjectTypeEXT objType);
463static VkBool32 validate_pipeline_cache(VkDevice dispatchable_object, VkPipelineCache object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
464static VkBool32 validate_render_pass(VkDevice dispatchable_object, VkRenderPass object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
465static VkBool32 validate_shader_module(VkDevice dispatchable_object, VkShaderModule object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
466static VkBool32 validate_pipeline_layout(VkDevice dispatchable_object, VkPipelineLayout object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
467static VkBool32 validate_pipeline(VkDevice dispatchable_object, VkPipeline object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
468static void destroy_command_pool(VkDevice dispatchable_object, VkCommandPool object);
469static void destroy_command_buffer(VkCommandBuffer dispatchable_object, VkCommandBuffer object);
470static void destroy_descriptor_pool(VkDevice dispatchable_object, VkDescriptorPool object);
471static void destroy_descriptor_set(VkDevice dispatchable_object, VkDescriptorSet object);
472static void destroy_device_memory(VkDevice dispatchable_object, VkDeviceMemory object);
473static void destroy_swapchain_khr(VkDevice dispatchable_object, VkSwapchainKHR object);
474static VkBool32 set_device_memory_status(VkDevice dispatchable_object, VkDeviceMemory object, VkDebugReportObjectTypeEXT objType, ObjectStatusFlags status_flag);
475static VkBool32 reset_device_memory_status(VkDevice dispatchable_object, VkDeviceMemory object, VkDebugReportObjectTypeEXT objType, ObjectStatusFlags status_flag);
476#if 0
477static VkBool32 validate_status(VkDevice dispatchable_object, VkFence object, VkDebugReportObjectTypeEXT objType,
478    ObjectStatusFlags status_mask, ObjectStatusFlags status_flag, VkFlags msg_flags, OBJECT_TRACK_ERROR  error_code,
479    const char         *fail_msg);
480#endif
481extern unordered_map<uint64_t, OBJTRACK_NODE*> VkPhysicalDeviceMap;
482extern unordered_map<uint64_t, OBJTRACK_NODE*> VkImageMap;
483extern unordered_map<uint64_t, OBJTRACK_NODE*> VkQueueMap;
484extern unordered_map<uint64_t, OBJTRACK_NODE*> VkDescriptorSetMap;
485extern unordered_map<uint64_t, OBJTRACK_NODE*> VkBufferMap;
486extern unordered_map<uint64_t, OBJTRACK_NODE*> VkFenceMap;
487extern unordered_map<uint64_t, OBJTRACK_NODE*> VkSemaphoreMap;
488extern unordered_map<uint64_t, OBJTRACK_NODE*> VkCommandPoolMap;
489extern unordered_map<uint64_t, OBJTRACK_NODE*> VkCommandBufferMap;
490extern unordered_map<uint64_t, OBJTRACK_NODE*> VkSwapchainKHRMap;
491extern unordered_map<uint64_t, OBJTRACK_NODE*> VkSurfaceKHRMap;
492
493static VkBool32 set_status(VkQueue dispatchable_object, VkFence object, VkDebugReportObjectTypeEXT objType, ObjectStatusFlags status_flag)
494{
495    VkBool32 skipCall = VK_FALSE;
496    if (object != VK_NULL_HANDLE) {
497        if (VkFenceMap.find((uint64_t)(object)) != VkFenceMap.end()) {
498            OBJTRACK_NODE* pNode = VkFenceMap[(uint64_t)(object)];
499            pNode->status |= status_flag;
500        }
501        else {
502            // If we do not find it print an error
503            skipCall |= log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, (uint64_t) object, __LINE__, OBJTRACK_NONE, "OBJTRACK",
504                "Unable to set status for non-existent object 0x%" PRIxLEAST64 " of %s type",
505                (uint64_t)(object), string_VkDebugReportObjectTypeEXT(objType));
506        }
507    }
508    return skipCall;
509}
510
511static void create_physical_device(VkInstance dispatchable_object, VkPhysicalDevice vkObj, VkDebugReportObjectTypeEXT objType)
512{
513    log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFO_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__, OBJTRACK_NONE, "OBJTRACK",
514        "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64 , object_track_index++, string_VkDebugReportObjectTypeEXT(objType),
515        reinterpret_cast<uint64_t>(vkObj));
516
517    OBJTRACK_NODE* pNewObjNode = new OBJTRACK_NODE;
518    pNewObjNode->objType = objType;
519    pNewObjNode->status  = OBJSTATUS_NONE;
520    pNewObjNode->vkObj  = reinterpret_cast<uint64_t>(vkObj);
521    VkPhysicalDeviceMap[reinterpret_cast<uint64_t>(vkObj)] = pNewObjNode;
522    uint32_t objIndex = objTypeToIndex(objType);
523    numObjs[objIndex]++;
524    numTotalObjs++;
525}
526
527static void create_surface_khr(VkInstance dispatchable_object, VkSurfaceKHR vkObj, VkDebugReportObjectTypeEXT objType)
528{
529    // TODO: Add tracking of surface objects
530    log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFO_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE, "OBJTRACK",
531        "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64 , object_track_index++, string_VkDebugReportObjectTypeEXT(objType),
532        (uint64_t)(vkObj));
533
534    OBJTRACK_NODE* pNewObjNode = new OBJTRACK_NODE;
535    pNewObjNode->objType = objType;
536    pNewObjNode->status  = OBJSTATUS_NONE;
537    pNewObjNode->vkObj   = (uint64_t)(vkObj);
538    VkSurfaceKHRMap[(uint64_t)vkObj] = pNewObjNode;
539    uint32_t objIndex = objTypeToIndex(objType);
540    numObjs[objIndex]++;
541    numTotalObjs++;
542}
543
544static void destroy_surface_khr(VkInstance dispatchable_object, VkSurfaceKHR object)
545{
546    uint64_t object_handle = (uint64_t)(object);
547    if (VkSurfaceKHRMap.find(object_handle) != VkSurfaceKHRMap.end()) {
548        OBJTRACK_NODE* pNode = VkSurfaceKHRMap[(uint64_t)object];
549        uint32_t objIndex = objTypeToIndex(pNode->objType);
550        assert(numTotalObjs > 0);
551        numTotalObjs--;
552        assert(numObjs[objIndex] > 0);
553        numObjs[objIndex]--;
554        log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFO_BIT_EXT, pNode->objType, object_handle, __LINE__, OBJTRACK_NONE, "OBJTRACK",
555           "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
556            string_VkDebugReportObjectTypeEXT(pNode->objType), (uint64_t)(object), numTotalObjs, numObjs[objIndex],
557            string_VkDebugReportObjectTypeEXT(pNode->objType));
558        delete pNode;
559        VkSurfaceKHRMap.erase(object_handle);
560    } else {
561        log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT ) 0, object_handle, __LINE__, OBJTRACK_NONE, "OBJTRACK",
562            "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?",
563           object_handle);
564    }
565}
566
567static void alloc_command_buffer(VkDevice device, VkCommandPool commandPool, VkCommandBuffer vkObj, VkDebugReportObjectTypeEXT objType, VkCommandBufferLevel level)
568{
569    log_msg(mdd(device), VK_DEBUG_REPORT_INFO_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__, OBJTRACK_NONE, "OBJTRACK",
570        "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64 , object_track_index++, string_VkDebugReportObjectTypeEXT(objType),
571        reinterpret_cast<uint64_t>(vkObj));
572
573    OBJTRACK_NODE* pNewObjNode = new OBJTRACK_NODE;
574    pNewObjNode->objType   = objType;
575    pNewObjNode->vkObj     = reinterpret_cast<uint64_t>(vkObj);
576    pNewObjNode->parentObj = (uint64_t) commandPool;
577    if (level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
578        pNewObjNode->status = OBJSTATUS_COMMAND_BUFFER_SECONDARY;
579    } else {
580        pNewObjNode->status = OBJSTATUS_NONE;
581    }
582    VkCommandBufferMap[reinterpret_cast<uint64_t>(vkObj)] = pNewObjNode;
583    uint32_t objIndex = objTypeToIndex(objType);
584    numObjs[objIndex]++;
585    numTotalObjs++;
586}
587
588static void free_command_buffer(VkDevice device, VkCommandPool commandPool, VkCommandBuffer commandBuffer)
589{
590    uint64_t object_handle = reinterpret_cast<uint64_t>(commandBuffer);
591    if (VkCommandBufferMap.find(object_handle) != VkCommandBufferMap.end()) {
592        OBJTRACK_NODE* pNode = VkCommandBufferMap[(uint64_t)commandBuffer];
593
594       if (pNode->parentObj != (uint64_t)(commandPool)) {
595           log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, pNode->objType, object_handle, __LINE__, OBJTRACK_COMMAND_POOL_MISMATCH, "OBJTRACK",
596               "FreeCommandBuffers is attempting to free Command Buffer 0x%" PRIxLEAST64 " belonging to Command Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").",
597               reinterpret_cast<uint64_t>(commandBuffer), pNode->parentObj, (uint64_t)(commandPool));
598       } else {
599
600            uint32_t objIndex = objTypeToIndex(pNode->objType);
601            assert(numTotalObjs > 0);
602            numTotalObjs--;
603            assert(numObjs[objIndex] > 0);
604            numObjs[objIndex]--;
605            log_msg(mdd(device), VK_DEBUG_REPORT_INFO_BIT_EXT, pNode->objType, object_handle, __LINE__, OBJTRACK_NONE, "OBJTRACK",
606               "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
607                string_VkDebugReportObjectTypeEXT(pNode->objType), reinterpret_cast<uint64_t>(commandBuffer), numTotalObjs, numObjs[objIndex],
608                string_VkDebugReportObjectTypeEXT(pNode->objType));
609            delete pNode;
610            VkCommandBufferMap.erase(object_handle);
611        }
612    } else {
613        log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, object_handle, __LINE__, OBJTRACK_NONE, "OBJTRACK",
614            "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?",
615           object_handle);
616    }
617}
618
619static void alloc_descriptor_set(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSet vkObj, VkDebugReportObjectTypeEXT objType)
620{
621    log_msg(mdd(device), VK_DEBUG_REPORT_INFO_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE, "OBJTRACK",
622        "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64 , object_track_index++, string_VkDebugReportObjectTypeEXT(objType),
623        (uint64_t)(vkObj));
624
625    OBJTRACK_NODE* pNewObjNode = new OBJTRACK_NODE;
626    pNewObjNode->objType   = objType;
627    pNewObjNode->status    = OBJSTATUS_NONE;
628    pNewObjNode->vkObj     = (uint64_t)(vkObj);
629    pNewObjNode->parentObj = (uint64_t) descriptorPool;
630    VkDescriptorSetMap[(uint64_t)vkObj] = pNewObjNode;
631    uint32_t objIndex = objTypeToIndex(objType);
632    numObjs[objIndex]++;
633    numTotalObjs++;
634}
635
636static void free_descriptor_set(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSet descriptorSet)
637{
638    uint64_t object_handle = (uint64_t)(descriptorSet);
639    if (VkDescriptorSetMap.find(object_handle) != VkDescriptorSetMap.end()) {
640        OBJTRACK_NODE* pNode = VkDescriptorSetMap[(uint64_t)descriptorSet];
641
642        if (pNode->parentObj != (uint64_t)(descriptorPool)) {
643            log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, pNode->objType, object_handle, __LINE__, OBJTRACK_DESCRIPTOR_POOL_MISMATCH, "OBJTRACK",
644                "FreeDescriptorSets is attempting to free descriptorSet 0x%" PRIxLEAST64 " belonging to Descriptor Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").",
645                (uint64_t)(descriptorSet), pNode->parentObj, (uint64_t)(descriptorPool));
646        } else {
647            uint32_t objIndex = objTypeToIndex(pNode->objType);
648            assert(numTotalObjs > 0);
649            numTotalObjs--;
650            assert(numObjs[objIndex] > 0);
651            numObjs[objIndex]--;
652            log_msg(mdd(device), VK_DEBUG_REPORT_INFO_BIT_EXT, pNode->objType, object_handle, __LINE__, OBJTRACK_NONE, "OBJTRACK",
653               "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
654                string_VkDebugReportObjectTypeEXT(pNode->objType), (uint64_t)(descriptorSet), numTotalObjs, numObjs[objIndex],
655                string_VkDebugReportObjectTypeEXT(pNode->objType));
656            delete pNode;
657            VkDescriptorSetMap.erase(object_handle);
658        }
659    } else {
660        log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, object_handle, __LINE__, OBJTRACK_NONE, "OBJTRACK",
661            "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?",
662           object_handle);
663    }
664}
665
666static void create_queue(VkDevice dispatchable_object, VkQueue vkObj, VkDebugReportObjectTypeEXT objType)
667{
668    log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFO_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__, OBJTRACK_NONE, "OBJTRACK",
669        "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64 , object_track_index++, string_VkDebugReportObjectTypeEXT(objType),
670        reinterpret_cast<uint64_t>(vkObj));
671
672    OBJTRACK_NODE* pNewObjNode = new OBJTRACK_NODE;
673    pNewObjNode->objType = objType;
674    pNewObjNode->status  = OBJSTATUS_NONE;
675    pNewObjNode->vkObj  = reinterpret_cast<uint64_t>(vkObj);
676    VkQueueMap[reinterpret_cast<uint64_t>(vkObj)] = pNewObjNode;
677    uint32_t objIndex = objTypeToIndex(objType);
678    numObjs[objIndex]++;
679    numTotalObjs++;
680}
681static void create_swapchain_image_obj(VkDevice dispatchable_object, VkImage vkObj, VkSwapchainKHR swapchain)
682{
683    log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFO_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, (uint64_t) vkObj, __LINE__, OBJTRACK_NONE, "OBJTRACK",
684        "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64 , object_track_index++, "SwapchainImage",
685        (uint64_t)(vkObj));
686
687    OBJTRACK_NODE* pNewObjNode             = new OBJTRACK_NODE;
688    pNewObjNode->objType                   = VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT;
689    pNewObjNode->status                    = OBJSTATUS_NONE;
690    pNewObjNode->vkObj                     = (uint64_t) vkObj;
691    pNewObjNode->parentObj                 = (uint64_t) swapchain;
692    swapchainImageMap[(uint64_t)(vkObj)] = pNewObjNode;
693}
694
695//
696// Non-auto-generated API functions called by generated code
697//
698VkResult
699explicit_CreateInstance(
700    const VkInstanceCreateInfo  *pCreateInfo,
701    const VkAllocationCallbacks *pAllocator,
702    VkInstance                  *pInstance)
703{
704    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
705
706    assert(chain_info->u.pLayerInfo);
707    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
708    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance) fpGetInstanceProcAddr(NULL, "vkCreateInstance");
709    if (fpCreateInstance == NULL) {
710        return VK_ERROR_INITIALIZATION_FAILED;
711    }
712
713    // Advance the link info for the next element on the chain
714    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
715
716    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
717    if (result != VK_SUCCESS) {
718        return result;
719    }
720
721    layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
722    initInstanceTable(*pInstance, fpGetInstanceProcAddr, object_tracker_instance_table_map);
723    VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(object_tracker_instance_table_map, *pInstance);
724
725    my_data->report_data = debug_report_create_instance(
726                               pInstanceTable,
727                               *pInstance,
728                               pCreateInfo->enabledExtensionCount,
729                               pCreateInfo->ppEnabledExtensionNames);
730
731    initObjectTracker(my_data, pAllocator);
732    createInstanceRegisterExtensions(pCreateInfo, *pInstance);
733
734    create_instance(*pInstance, *pInstance, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT);
735
736    return result;
737}
738
739void
740explicit_GetPhysicalDeviceQueueFamilyProperties(
741    VkPhysicalDevice                 gpu,
742    uint32_t*                        pCount,
743    VkQueueFamilyProperties*         pProperties)
744{
745    get_dispatch_table(object_tracker_instance_table_map, gpu)->GetPhysicalDeviceQueueFamilyProperties(gpu, pCount, pProperties);
746
747    loader_platform_thread_lock_mutex(&objLock);
748    if (pProperties != NULL)
749        setGpuQueueInfoState(*pCount, pProperties);
750    loader_platform_thread_unlock_mutex(&objLock);
751}
752
753VkResult
754explicit_CreateDevice(
755    VkPhysicalDevice         gpu,
756    const VkDeviceCreateInfo *pCreateInfo,
757    const VkAllocationCallbacks   *pAllocator,
758    VkDevice                 *pDevice)
759{
760    loader_platform_thread_lock_mutex(&objLock);
761    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
762
763    assert(chain_info->u.pLayerInfo);
764    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
765    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
766    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice) fpGetInstanceProcAddr(NULL, "vkCreateDevice");
767    if (fpCreateDevice == NULL) {
768        loader_platform_thread_unlock_mutex(&objLock);
769        return VK_ERROR_INITIALIZATION_FAILED;
770    }
771
772    // Advance the link info for the next element on the chain
773    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
774
775    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
776    if (result != VK_SUCCESS) {
777        loader_platform_thread_unlock_mutex(&objLock);
778        return result;
779    }
780
781    layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
782    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
783    my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
784
785    initDeviceTable(*pDevice, fpGetDeviceProcAddr, object_tracker_device_table_map);
786
787    createDeviceRegisterExtensions(pCreateInfo, *pDevice);
788
789    create_device(*pDevice, *pDevice, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT);
790
791    loader_platform_thread_unlock_mutex(&objLock);
792    return result;
793}
794
795VkResult explicit_EnumeratePhysicalDevices(VkInstance instance, uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices)
796{
797    VkBool32 skipCall = VK_FALSE;
798    loader_platform_thread_lock_mutex(&objLock);
799    skipCall |= validate_instance(instance, instance, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, false);
800    loader_platform_thread_unlock_mutex(&objLock);
801    if (skipCall)
802        return VK_ERROR_VALIDATION_FAILED_EXT;
803    VkResult result = get_dispatch_table(object_tracker_instance_table_map, instance)->EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
804    loader_platform_thread_lock_mutex(&objLock);
805    if (result == VK_SUCCESS) {
806        if (pPhysicalDevices) {
807            for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
808                create_physical_device(instance, pPhysicalDevices[i], VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT);
809            }
810        }
811    }
812    loader_platform_thread_unlock_mutex(&objLock);
813    return result;
814}
815
816void
817explicit_GetDeviceQueue(
818    VkDevice  device,
819    uint32_t  queueNodeIndex,
820    uint32_t  queueIndex,
821    VkQueue  *pQueue)
822{
823    loader_platform_thread_lock_mutex(&objLock);
824    validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
825    loader_platform_thread_unlock_mutex(&objLock);
826
827    get_dispatch_table(object_tracker_device_table_map, device)->GetDeviceQueue(device, queueNodeIndex, queueIndex, pQueue);
828
829    loader_platform_thread_lock_mutex(&objLock);
830    addQueueInfo(queueNodeIndex, *pQueue);
831    create_queue(device, *pQueue, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT);
832    loader_platform_thread_unlock_mutex(&objLock);
833}
834
835VkResult
836explicit_MapMemory(
837    VkDevice         device,
838    VkDeviceMemory   mem,
839    VkDeviceSize     offset,
840    VkDeviceSize     size,
841    VkFlags          flags,
842    void           **ppData)
843{
844    VkBool32 skipCall = VK_FALSE;
845    loader_platform_thread_lock_mutex(&objLock);
846    skipCall |= set_device_memory_status(device, mem, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, OBJSTATUS_GPU_MEM_MAPPED);
847    skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
848    loader_platform_thread_unlock_mutex(&objLock);
849    if (skipCall == VK_TRUE)
850        return VK_ERROR_VALIDATION_FAILED_EXT;
851
852    VkResult result = get_dispatch_table(object_tracker_device_table_map, device)->MapMemory(device, mem, offset, size, flags, ppData);
853
854    return result;
855}
856
857void
858explicit_UnmapMemory(
859    VkDevice       device,
860    VkDeviceMemory mem)
861{
862    VkBool32 skipCall = VK_FALSE;
863    loader_platform_thread_lock_mutex(&objLock);
864    skipCall |= reset_device_memory_status(device, mem, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, OBJSTATUS_GPU_MEM_MAPPED);
865    skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
866    loader_platform_thread_unlock_mutex(&objLock);
867    if (skipCall == VK_TRUE)
868        return;
869
870    get_dispatch_table(object_tracker_device_table_map, device)->UnmapMemory(device, mem);
871}
872
873VkResult
874explicit_QueueBindSparse(
875    VkQueue                       queue,
876    uint32_t                                    bindInfoCount,
877    const VkBindSparseInfo*                     pBindInfo,
878    VkFence                                     fence)
879{
880    loader_platform_thread_lock_mutex(&objLock);
881    validateQueueFlags(queue, "QueueBindSparse");
882
883    for (uint32_t i = 0; i < bindInfoCount; i++) {
884        for (uint32_t j = 0; j < pBindInfo[i].bufferBindCount; j++)
885            validate_buffer(queue, pBindInfo[i].pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, false);
886        for (uint32_t j = 0; j < pBindInfo[i].imageOpaqueBindCount; j++)
887            validate_image(queue, pBindInfo[i].pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, false);
888        for (uint32_t j = 0; j < pBindInfo[i].imageBindCount; j++)
889            validate_image(queue, pBindInfo[i].pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, false);
890    }
891
892    loader_platform_thread_unlock_mutex(&objLock);
893
894    VkResult result = get_dispatch_table(object_tracker_device_table_map, queue)->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
895    return result;
896}
897
898VkResult
899explicit_AllocateCommandBuffers(
900    VkDevice                           device,
901    const VkCommandBufferAllocateInfo *pAllocateInfo,
902    VkCommandBuffer*                   pCommandBuffers)
903{
904    VkBool32 skipCall = VK_FALSE;
905    loader_platform_thread_lock_mutex(&objLock);
906    skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
907    skipCall |= validate_command_pool(device, pAllocateInfo->commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false);
908    loader_platform_thread_unlock_mutex(&objLock);
909
910    if (skipCall) {
911        return VK_ERROR_VALIDATION_FAILED_EXT;
912    }
913
914    VkResult result = get_dispatch_table(object_tracker_device_table_map, device)->AllocateCommandBuffers(
915        device, pAllocateInfo, pCommandBuffers);
916
917    loader_platform_thread_lock_mutex(&objLock);
918    for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; i++) {
919        alloc_command_buffer(device, pAllocateInfo->commandPool, pCommandBuffers[i], VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, pAllocateInfo->level);
920    }
921    loader_platform_thread_unlock_mutex(&objLock);
922
923    return result;
924}
925
926VkResult
927explicit_AllocateDescriptorSets(
928    VkDevice                           device,
929    const VkDescriptorSetAllocateInfo *pAllocateInfo,
930    VkDescriptorSet                   *pDescriptorSets)
931{
932    VkBool32 skipCall = VK_FALSE;
933    loader_platform_thread_lock_mutex(&objLock);
934    skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
935    skipCall |= validate_descriptor_pool(device, pAllocateInfo->descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false);
936    for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
937        skipCall |= validate_descriptor_set_layout(device, pAllocateInfo->pSetLayouts[i], VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, false);
938    }
939    loader_platform_thread_unlock_mutex(&objLock);
940    if (skipCall)
941        return VK_ERROR_VALIDATION_FAILED_EXT;
942
943    VkResult result = get_dispatch_table(object_tracker_device_table_map, device)->AllocateDescriptorSets(
944        device, pAllocateInfo, pDescriptorSets);
945
946    if (VK_SUCCESS == result) {
947        loader_platform_thread_lock_mutex(&objLock);
948        for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
949            alloc_descriptor_set(device, pAllocateInfo->descriptorPool, pDescriptorSets[i], VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT);
950        }
951        loader_platform_thread_unlock_mutex(&objLock);
952    }
953
954    return result;
955}
956
957void
958explicit_FreeCommandBuffers(
959    VkDevice               device,
960    VkCommandPool          commandPool,
961    uint32_t               commandBufferCount,
962    const VkCommandBuffer *pCommandBuffers)
963{
964    loader_platform_thread_lock_mutex(&objLock);
965    validate_command_pool(device, commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false);
966    validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
967    loader_platform_thread_unlock_mutex(&objLock);
968
969    get_dispatch_table(object_tracker_device_table_map, device)->FreeCommandBuffers(device,
970        commandPool, commandBufferCount, pCommandBuffers);
971
972    loader_platform_thread_lock_mutex(&objLock);
973    for (uint32_t i = 0; i < commandBufferCount; i++)
974    {
975        free_command_buffer(device, commandPool, *pCommandBuffers);
976        pCommandBuffers++;
977    }
978    loader_platform_thread_unlock_mutex(&objLock);
979}
980
981void
982explicit_DestroySwapchainKHR(
983    VkDevice                    device,
984    VkSwapchainKHR              swapchain,
985    const VkAllocationCallbacks *pAllocator)
986{
987    loader_platform_thread_lock_mutex(&objLock);
988    // A swapchain's images are implicitly deleted when the swapchain is deleted.
989    // Remove this swapchain's images from our map of such images.
990    unordered_map<uint64_t, OBJTRACK_NODE*>::iterator itr = swapchainImageMap.begin();
991    while (itr != swapchainImageMap.end()) {
992        OBJTRACK_NODE* pNode = (*itr).second;
993        if (pNode->parentObj == (uint64_t)(swapchain)) {
994           swapchainImageMap.erase(itr++);
995        } else {
996           ++itr;
997        }
998    }
999    destroy_swapchain_khr(device, swapchain);
1000    loader_platform_thread_unlock_mutex(&objLock);
1001
1002    get_dispatch_table(object_tracker_device_table_map, device)->DestroySwapchainKHR(device, swapchain, pAllocator);
1003}
1004
1005void
1006explicit_FreeMemory(
1007    VkDevice       device,
1008    VkDeviceMemory mem,
1009    const VkAllocationCallbacks* pAllocator)
1010{
1011    loader_platform_thread_lock_mutex(&objLock);
1012    validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
1013    loader_platform_thread_unlock_mutex(&objLock);
1014
1015    get_dispatch_table(object_tracker_device_table_map, device)->FreeMemory(device, mem, pAllocator);
1016
1017    loader_platform_thread_lock_mutex(&objLock);
1018    destroy_device_memory(device, mem);
1019    loader_platform_thread_unlock_mutex(&objLock);
1020}
1021
1022VkResult
1023explicit_FreeDescriptorSets(
1024    VkDevice               device,
1025    VkDescriptorPool       descriptorPool,
1026    uint32_t               count,
1027    const VkDescriptorSet *pDescriptorSets)
1028{
1029    loader_platform_thread_lock_mutex(&objLock);
1030    validate_descriptor_pool(device, descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false);
1031    validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
1032    loader_platform_thread_unlock_mutex(&objLock);
1033    VkResult result = get_dispatch_table(object_tracker_device_table_map, device)->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
1034
1035    loader_platform_thread_lock_mutex(&objLock);
1036    for (uint32_t i=0; i<count; i++)
1037    {
1038        free_descriptor_set(device, descriptorPool, *pDescriptorSets++);
1039    }
1040    loader_platform_thread_unlock_mutex(&objLock);
1041    return result;
1042}
1043
1044void
1045explicit_DestroyDescriptorPool(
1046    VkDevice                     device,
1047    VkDescriptorPool             descriptorPool,
1048    const VkAllocationCallbacks *pAllocator)
1049{
1050    VkBool32 skipCall = VK_FALSE;
1051    loader_platform_thread_lock_mutex(&objLock);
1052    skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
1053    skipCall |= validate_descriptor_pool(device, descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false);
1054    loader_platform_thread_unlock_mutex(&objLock);
1055    if (skipCall) {
1056        return;
1057    }
1058    // A DescriptorPool's descriptor sets are implicitly deleted when the pool is deleted.
1059    // Remove this pool's descriptor sets from our descriptorSet map.
1060    loader_platform_thread_lock_mutex(&objLock);
1061    unordered_map<uint64_t, OBJTRACK_NODE*>::iterator itr = VkDescriptorSetMap.begin();
1062    while (itr != VkDescriptorSetMap.end()) {
1063        OBJTRACK_NODE* pNode = (*itr).second;
1064        auto del_itr = itr++;
1065        if (pNode->parentObj == (uint64_t)(descriptorPool)) {
1066            destroy_descriptor_set(device, (VkDescriptorSet)((*del_itr).first));
1067        }
1068    }
1069    destroy_descriptor_pool(device, descriptorPool);
1070    loader_platform_thread_unlock_mutex(&objLock);
1071    get_dispatch_table(object_tracker_device_table_map, device)->DestroyDescriptorPool(device, descriptorPool, pAllocator);
1072}
1073
1074void
1075explicit_DestroyCommandPool(
1076    VkDevice                     device,
1077    VkCommandPool                commandPool,
1078    const VkAllocationCallbacks *pAllocator)
1079{
1080    VkBool32 skipCall = VK_FALSE;
1081    loader_platform_thread_lock_mutex(&objLock);
1082    skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
1083    skipCall |= validate_command_pool(device, commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false);
1084    loader_platform_thread_unlock_mutex(&objLock);
1085    if (skipCall) {
1086        return;
1087    }
1088    loader_platform_thread_lock_mutex(&objLock);
1089    // A CommandPool's command buffers are implicitly deleted when the pool is deleted.
1090    // Remove this pool's cmdBuffers from our cmd buffer map.
1091    unordered_map<uint64_t, OBJTRACK_NODE*>::iterator itr = VkCommandBufferMap.begin();
1092    unordered_map<uint64_t, OBJTRACK_NODE*>::iterator del_itr;
1093    while (itr != VkCommandBufferMap.end()) {
1094        OBJTRACK_NODE* pNode = (*itr).second;
1095        del_itr = itr++;
1096        if (pNode->parentObj == (uint64_t)(commandPool)) {
1097            destroy_command_buffer(reinterpret_cast<VkCommandBuffer>((*del_itr).first),
1098                                   reinterpret_cast<VkCommandBuffer>((*del_itr).first));
1099        }
1100    }
1101    destroy_command_pool(device, commandPool);
1102    loader_platform_thread_unlock_mutex(&objLock);
1103    get_dispatch_table(object_tracker_device_table_map, device)->DestroyCommandPool(device, commandPool, pAllocator);
1104}
1105
1106VkResult
1107explicit_GetSwapchainImagesKHR(
1108    VkDevice        device,
1109    VkSwapchainKHR  swapchain,
1110    uint32_t       *pCount,
1111    VkImage        *pSwapchainImages)
1112{
1113    VkBool32 skipCall = VK_FALSE;
1114    loader_platform_thread_lock_mutex(&objLock);
1115    skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
1116    loader_platform_thread_unlock_mutex(&objLock);
1117    if (skipCall)
1118        return VK_ERROR_VALIDATION_FAILED_EXT;
1119
1120    VkResult result = get_dispatch_table(object_tracker_device_table_map, device)->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
1121
1122    if (pSwapchainImages != NULL) {
1123        loader_platform_thread_lock_mutex(&objLock);
1124        for (uint32_t i = 0; i < *pCount; i++) {
1125            create_swapchain_image_obj(device, pSwapchainImages[i], swapchain);
1126        }
1127        loader_platform_thread_unlock_mutex(&objLock);
1128    }
1129    return result;
1130}
1131
1132// TODO: Add special case to codegen to cover validating all the pipelines instead of just the first
1133VkResult
1134explicit_CreateGraphicsPipelines(
1135    VkDevice                            device,
1136    VkPipelineCache                     pipelineCache,
1137    uint32_t                            createInfoCount,
1138    const VkGraphicsPipelineCreateInfo *pCreateInfos,
1139    const VkAllocationCallbacks        *pAllocator,
1140    VkPipeline                         *pPipelines)
1141{
1142    VkBool32 skipCall = VK_FALSE;
1143    loader_platform_thread_lock_mutex(&objLock);
1144    skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
1145    if (pCreateInfos) {
1146        for (uint32_t idx0=0; idx0<createInfoCount; ++idx0) {
1147            if (pCreateInfos[idx0].basePipelineHandle) {
1148                skipCall |= validate_pipeline(device, pCreateInfos[idx0].basePipelineHandle, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, true);
1149            }
1150            if (pCreateInfos[idx0].layout) {
1151                skipCall |= validate_pipeline_layout(device, pCreateInfos[idx0].layout, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, false);
1152            }
1153            if (pCreateInfos[idx0].pStages) {
1154                for (uint32_t idx1=0; idx1<pCreateInfos[idx0].stageCount; ++idx1) {
1155                    if (pCreateInfos[idx0].pStages[idx1].module) {
1156                        skipCall |= validate_shader_module(device, pCreateInfos[idx0].pStages[idx1].module, VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, false);
1157                    }
1158                }
1159            }
1160            if (pCreateInfos[idx0].renderPass) {
1161                skipCall |= validate_render_pass(device, pCreateInfos[idx0].renderPass, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, false);
1162            }
1163        }
1164    }
1165    if (pipelineCache) {
1166        skipCall |= validate_pipeline_cache(device, pipelineCache, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, false);
1167    }
1168    loader_platform_thread_unlock_mutex(&objLock);
1169    if (skipCall)
1170        return VK_ERROR_VALIDATION_FAILED_EXT;
1171    VkResult result = get_dispatch_table(object_tracker_device_table_map, device)->CreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
1172    loader_platform_thread_lock_mutex(&objLock);
1173    if (result == VK_SUCCESS) {
1174        for (uint32_t idx2 = 0; idx2 < createInfoCount; ++idx2) {
1175            create_pipeline(device, pPipelines[idx2], VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT);
1176        }
1177    }
1178    loader_platform_thread_unlock_mutex(&objLock);
1179    return result;
1180}
1181
1182// TODO: Add special case to codegen to cover validating all the pipelines instead of just the first
1183VkResult
1184explicit_CreateComputePipelines(
1185    VkDevice                           device,
1186    VkPipelineCache                    pipelineCache,
1187    uint32_t                           createInfoCount,
1188    const VkComputePipelineCreateInfo *pCreateInfos,
1189    const VkAllocationCallbacks       *pAllocator,
1190    VkPipeline                        *pPipelines)
1191{
1192    VkBool32 skipCall = VK_FALSE;
1193    loader_platform_thread_lock_mutex(&objLock);
1194    skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
1195    if (pCreateInfos) {
1196        for (uint32_t idx0=0; idx0<createInfoCount; ++idx0) {
1197            if (pCreateInfos[idx0].basePipelineHandle) {
1198                skipCall |= validate_pipeline(device, pCreateInfos[idx0].basePipelineHandle, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, true);
1199            }
1200            if (pCreateInfos[idx0].layout) {
1201                skipCall |= validate_pipeline_layout(device, pCreateInfos[idx0].layout, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, false);
1202            }
1203            if (pCreateInfos[idx0].stage.module) {
1204                skipCall |= validate_shader_module(device, pCreateInfos[idx0].stage.module, VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, false);
1205            }
1206        }
1207    }
1208    if (pipelineCache) {
1209        skipCall |= validate_pipeline_cache(device, pipelineCache, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, false);
1210    }
1211    loader_platform_thread_unlock_mutex(&objLock);
1212    if (skipCall)
1213        return VK_ERROR_VALIDATION_FAILED_EXT;
1214    VkResult result = get_dispatch_table(object_tracker_device_table_map, device)->CreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
1215    loader_platform_thread_lock_mutex(&objLock);
1216    if (result == VK_SUCCESS) {
1217        for (uint32_t idx1 = 0; idx1 < createInfoCount; ++idx1) {
1218            create_pipeline(device, pPipelines[idx1], VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT);
1219        }
1220    }
1221    loader_platform_thread_unlock_mutex(&objLock);
1222    return result;
1223}
1224