core_validation.cpp revision 7068a4271b2a542d7e6931104fa62e4a788ed8e5
1/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and/or associated documentation files (the "Materials"), to
8 * deal in the Materials without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Materials, and to permit persons to whom the Materials
11 * are furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice(s) and this permission notice shall be included
14 * in all copies or substantial portions of the Materials.
15 *
16 * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 *
20 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE
23 * USE OR OTHER DEALINGS IN THE MATERIALS
24 *
25 * Author: Cody Northrop <cnorthrop@google.com>
26 * Author: Michael Lentine <mlentine@google.com>
27 * Author: Tobin Ehlis <tobine@google.com>
28 * Author: Chia-I Wu <olv@google.com>
29 * Author: Chris Forbes <chrisf@ijw.co.nz>
30 * Author: Mark Lobodzinski <mark@lunarg.com>
31 * Author: Ian Elliott <ianelliott@google.com>
32 */
33
34// Allow use of STL min and max functions in Windows
35#define NOMINMAX
36
37// Turn on mem_tracker merged code
38#define MTMERGESOURCE 1
39
40#include <stdio.h>
41#include <stdlib.h>
42#include <string.h>
43#include <assert.h>
44#include <unordered_map>
45#include <unordered_set>
46#include <map>
47#include <string>
48#include <iostream>
49#include <algorithm>
50#include <list>
51#include <SPIRV/spirv.hpp>
52#include <set>
53
54#include "vk_loader_platform.h"
55#include "vk_dispatch_table_helper.h"
56#include "vk_struct_string_helper_cpp.h"
57#if defined(__GNUC__)
58#pragma GCC diagnostic ignored "-Wwrite-strings"
59#endif
60#if defined(__GNUC__)
61#pragma GCC diagnostic warning "-Wwrite-strings"
62#endif
63#include "vk_struct_size_helper.h"
64#include "core_validation.h"
65#include "vk_layer_config.h"
66#include "vk_layer_table.h"
67#include "vk_layer_data.h"
68#include "vk_layer_logging.h"
69#include "vk_layer_extension_utils.h"
70#include "vk_layer_utils.h"
71
72#if defined __ANDROID__
73#include <android/log.h>
74#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
75#else
76#define LOGCONSOLE(...) printf(__VA_ARGS__)
77#endif
78
79using std::unordered_map;
80using std::unordered_set;
81
82#if MTMERGESOURCE
83// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
84// Object value will be used to identify them internally.
85static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
86#endif
87// Track command pools and their command buffers
88struct CMD_POOL_INFO {
89    VkCommandPoolCreateFlags createFlags;
90    uint32_t queueFamilyIndex;
91    list<VkCommandBuffer> commandBuffers; // list container of cmd buffers allocated from this pool
92};
93
94struct devExts {
95    VkBool32 wsi_enabled;
96    unordered_map<VkSwapchainKHR, SWAPCHAIN_NODE *> swapchainMap;
97    unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
98};
99
100// fwd decls
101struct shader_module;
102struct render_pass;
103
104struct layer_data {
105    debug_report_data *report_data;
106    std::vector<VkDebugReportCallbackEXT> logging_callback;
107    VkLayerDispatchTable *device_dispatch_table;
108    VkLayerInstanceDispatchTable *instance_dispatch_table;
109#if MTMERGESOURCE
110// MTMERGESOURCE - stuff pulled directly from MT
111    uint64_t currentFenceId;
112    // Maps for tracking key structs related to mem_tracker state
113    unordered_map<VkDescriptorSet, MT_DESCRIPTOR_SET_INFO> descriptorSetMap;
114    // Images and Buffers are 2 objects that can have memory bound to them so they get special treatment
115    unordered_map<uint64_t, MT_OBJ_BINDING_INFO> imageBindingMap;
116    unordered_map<uint64_t, MT_OBJ_BINDING_INFO> bufferBindingMap;
117// MTMERGESOURCE - End of MT stuff
118#endif
119    devExts device_extensions;
120    vector<VkQueue> queues; // all queues under given device
121    // Global set of all cmdBuffers that are inFlight on this device
122    unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
123    // Layer specific data
124    unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> sampleMap;
125    unordered_map<VkImageView, VkImageViewCreateInfo> imageViewMap;
126    unordered_map<VkImage, IMAGE_NODE> imageMap;
127    unordered_map<VkBufferView, VkBufferViewCreateInfo> bufferViewMap;
128    unordered_map<VkBuffer, BUFFER_NODE> bufferMap;
129    unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap;
130    unordered_map<VkCommandPool, CMD_POOL_INFO> commandPoolMap;
131    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap;
132    unordered_map<VkDescriptorSet, SET_NODE *> setMap;
133    unordered_map<VkDescriptorSetLayout, LAYOUT_NODE *> descriptorSetLayoutMap;
134    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
135    unordered_map<VkDeviceMemory, DEVICE_MEM_INFO> memObjMap;
136    unordered_map<VkFence, FENCE_NODE> fenceMap;
137    unordered_map<VkQueue, QUEUE_NODE> queueMap;
138    unordered_map<VkEvent, EVENT_NODE> eventMap;
139    unordered_map<QueryObject, bool> queryToStateMap;
140    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
141    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
142    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
143    unordered_map<VkFramebuffer, FRAMEBUFFER_NODE> frameBufferMap;
144    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
145    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
146    unordered_map<VkRenderPass, RENDER_PASS_NODE *> renderPassMap;
147    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
148    // Current render pass
149    VkRenderPassBeginInfo renderPassBeginInfo;
150    uint32_t currentSubpass;
151
152    // Device specific data
153    PHYS_DEV_PROPERTIES_NODE physDevProperties;
154// MTMERGESOURCE - added a couple of fields to constructor initializer
155    layer_data()
156        : report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr),
157#if MTMERGESOURCE
158        currentFenceId(1),
159#endif
160        device_extensions(){};
161};
162
163static const VkLayerProperties cv_global_layers[] = {{
164    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
165}};
166
167template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
168    bool foundLayer = false;
169    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
170        if (!strcmp(createInfo.ppEnabledLayerNames[i], cv_global_layers[0].layerName)) {
171            foundLayer = true;
172        }
173        // This has to be logged to console as we don't have a callback at this point.
174        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
175            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
176                       cv_global_layers[0].layerName);
177        }
178    }
179}
180
181// Code imported from shader_checker
182static void build_def_index(shader_module *);
183
184// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
185// without the caller needing to care too much about the physical SPIRV module layout.
186struct spirv_inst_iter {
187    std::vector<uint32_t>::const_iterator zero;
188    std::vector<uint32_t>::const_iterator it;
189
190    uint32_t len() { return *it >> 16; }
191    uint32_t opcode() { return *it & 0x0ffffu; }
192    uint32_t const &word(unsigned n) { return it[n]; }
193    uint32_t offset() { return (uint32_t)(it - zero); }
194
195    spirv_inst_iter() {}
196
197    spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
198
199    bool operator==(spirv_inst_iter const &other) { return it == other.it; }
200
201    bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
202
203    spirv_inst_iter operator++(int) { /* x++ */
204        spirv_inst_iter ii = *this;
205        it += len();
206        return ii;
207    }
208
209    spirv_inst_iter operator++() { /* ++x; */
210        it += len();
211        return *this;
212    }
213
214    /* The iterator and the value are the same thing. */
215    spirv_inst_iter &operator*() { return *this; }
216    spirv_inst_iter const &operator*() const { return *this; }
217};
218
219struct shader_module {
220    /* the spirv image itself */
221    vector<uint32_t> words;
222    /* a mapping of <id> to the first word of its def. this is useful because walking type
223     * trees, constant expressions, etc requires jumping all over the instruction stream.
224     */
225    unordered_map<unsigned, unsigned> def_index;
226
227    shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
228        : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
229          def_index() {
230
231        build_def_index(this);
232    }
233
234    /* expose begin() / end() to enable range-based for */
235    spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
236    spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }         /* just past last insn */
237    /* given an offset into the module, produce an iterator there. */
238    spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
239
240    /* gets an iterator to the definition of an id */
241    spirv_inst_iter get_def(unsigned id) const {
242        auto it = def_index.find(id);
243        if (it == def_index.end()) {
244            return end();
245        }
246        return at(it->second);
247    }
248};
249
250// TODO : Do we need to guard access to layer_data_map w/ lock?
251static unordered_map<void *, layer_data *> layer_data_map;
252
253// TODO : This can be much smarter, using separate locks for separate global data
254static int globalLockInitialized = 0;
255static loader_platform_thread_mutex globalLock;
256#define MAX_TID 513
257static loader_platform_thread_id g_tidMapping[MAX_TID] = {0};
258static uint32_t g_maxTID = 0;
259#if MTMERGESOURCE
260// MTMERGESOURCE - start of direct pull
261static VkPhysicalDeviceMemoryProperties memProps;
262
263static void clear_cmd_buf_and_mem_references(layer_data *my_data, const VkCommandBuffer cb);
264
265#define MAX_BINDING 0xFFFFFFFF
266
267static MT_OBJ_BINDING_INFO *get_object_binding_info(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
268    MT_OBJ_BINDING_INFO *retValue = NULL;
269    switch (type) {
270    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
271        auto it = my_data->imageBindingMap.find(handle);
272        if (it != my_data->imageBindingMap.end())
273            return &(*it).second;
274        break;
275    }
276    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
277        auto it = my_data->bufferBindingMap.find(handle);
278        if (it != my_data->bufferBindingMap.end())
279            return &(*it).second;
280        break;
281    }
282    default:
283        break;
284    }
285    return retValue;
286}
287// MTMERGESOURCE - end section
288#endif
289template layer_data *get_my_data_ptr<layer_data>(void *data_key, std::unordered_map<void *, layer_data *> &data_map);
290
291// prototype
292static GLOBAL_CB_NODE *getCBNode(layer_data *, const VkCommandBuffer);
293
294#if MTMERGESOURCE
295static void delete_queue_info_list(layer_data *my_data) {
296    // Process queue list, cleaning up each entry before deleting
297    my_data->queueMap.clear();
298}
299
300// Delete CBInfo from container and clear mem references to CB
301static void delete_cmd_buf_info(layer_data *my_data, VkCommandPool commandPool, const VkCommandBuffer cb) {
302    clear_cmd_buf_and_mem_references(my_data, cb);
303    // Delete the CBInfo info
304    my_data->commandPoolMap[commandPool].commandBuffers.remove(cb);
305    my_data->commandBufferMap.erase(cb);
306}
307
308static void add_object_binding_info(layer_data *my_data, const uint64_t handle, const VkDebugReportObjectTypeEXT type,
309                                    const VkDeviceMemory mem) {
310    switch (type) {
311    // Buffers and images are unique as their CreateInfo is in container struct
312    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
313        auto pCI = &my_data->bufferBindingMap[handle];
314        pCI->mem = mem;
315        break;
316    }
317    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
318        auto pCI = &my_data->imageBindingMap[handle];
319        pCI->mem = mem;
320        break;
321    }
322    default:
323        break;
324    }
325}
326
327static void add_object_create_info(layer_data *my_data, const uint64_t handle, const VkDebugReportObjectTypeEXT type,
328                                   const void *pCreateInfo) {
329    // TODO : For any CreateInfo struct that has ptrs, need to deep copy them and appropriately clean up on Destroy
330    switch (type) {
331    // Buffers and images are unique as their CreateInfo is in container struct
332    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
333        auto pCI = &my_data->bufferBindingMap[handle];
334        memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
335        memcpy(&pCI->create_info.buffer, pCreateInfo, sizeof(VkBufferCreateInfo));
336        break;
337    }
338    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
339        auto pCI = &my_data->imageBindingMap[handle];
340        memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
341        memcpy(&pCI->create_info.image, pCreateInfo, sizeof(VkImageCreateInfo));
342        break;
343    }
344    // Swap Chain is very unique, use my_data->imageBindingMap, but copy in
345    // SwapChainCreatInfo's usage flags and set the mem value to a unique key. These is used by
346    // vkCreateImageView and internal mem_tracker routines to distinguish swap chain images
347    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT: {
348        auto pCI = &my_data->imageBindingMap[handle];
349        memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
350        pCI->mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
351        pCI->valid = false;
352        pCI->create_info.image.usage =
353            const_cast<VkSwapchainCreateInfoKHR *>(static_cast<const VkSwapchainCreateInfoKHR *>(pCreateInfo))->imageUsage;
354        break;
355    }
356    default:
357        break;
358    }
359}
360
361// Add a fence, creating one if necessary to our list of fences/fenceIds
362static VkBool32 add_fence_info(layer_data *my_data, VkFence fence, VkQueue queue, uint64_t *fenceId) {
363    VkBool32 skipCall = VK_FALSE;
364    *fenceId = my_data->currentFenceId++;
365
366    // If no fence, create an internal fence to track the submissions
367    if (fence != VK_NULL_HANDLE) {
368        my_data->fenceMap[fence].fenceId = *fenceId;
369        my_data->fenceMap[fence].queue = queue;
370        // Validate that fence is in UNSIGNALED state
371        VkFenceCreateInfo *pFenceCI = &(my_data->fenceMap[fence].createInfo);
372        if (pFenceCI->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
373            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
374                               (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
375                               "Fence %#" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
376                               (uint64_t)fence);
377        }
378    } else {
379        // TODO : Do we need to create an internal fence here for tracking purposes?
380    }
381    // Update most recently submitted fence and fenceId for Queue
382    my_data->queueMap[queue].lastSubmittedId = *fenceId;
383    return skipCall;
384}
385
386// Remove a fenceInfo from our list of fences/fenceIds
387static void delete_fence_info(layer_data *my_data, VkFence fence) { my_data->fenceMap.erase(fence); }
388
389// Record information when a fence is known to be signalled
390static void update_fence_tracking(layer_data *my_data, VkFence fence) {
391    auto fence_item = my_data->fenceMap.find(fence);
392    if (fence_item != my_data->fenceMap.end()) {
393        FENCE_NODE *pCurFenceInfo = &(*fence_item).second;
394        VkQueue queue = pCurFenceInfo->queue;
395        auto queue_item = my_data->queueMap.find(queue);
396        if (queue_item != my_data->queueMap.end()) {
397            QUEUE_NODE *pQueueInfo = &(*queue_item).second;
398            if (pQueueInfo->lastRetiredId < pCurFenceInfo->fenceId) {
399                pQueueInfo->lastRetiredId = pCurFenceInfo->fenceId;
400            }
401        }
402    }
403
404    // Update fence state in fenceCreateInfo structure
405    auto pFCI = &(my_data->fenceMap[fence].createInfo);
406    pFCI->flags = static_cast<VkFenceCreateFlags>(pFCI->flags | VK_FENCE_CREATE_SIGNALED_BIT);
407}
408
409// Helper routine that updates the fence list for a specific queue to all-retired
410static void retire_queue_fences(layer_data *my_data, VkQueue queue) {
411    QUEUE_NODE *pQueueInfo = &my_data->queueMap[queue];
412    // Set queue's lastRetired to lastSubmitted indicating all fences completed
413    pQueueInfo->lastRetiredId = pQueueInfo->lastSubmittedId;
414}
415
416// Helper routine that updates all queues to all-retired
417static void retire_device_fences(layer_data *my_data, VkDevice device) {
418    // Process each queue for device
419    // TODO: Add multiple device support
420    for (auto ii = my_data->queueMap.begin(); ii != my_data->queueMap.end(); ++ii) {
421        // Set queue's lastRetired to lastSubmitted indicating all fences completed
422        QUEUE_NODE *pQueueInfo = &(*ii).second;
423        pQueueInfo->lastRetiredId = pQueueInfo->lastSubmittedId;
424    }
425}
426
427// Helper function to validate correct usage bits set for buffers or images
428//  Verify that (actual & desired) flags != 0 or,
429//   if strict is true, verify that (actual & desired) flags == desired
430//  In case of error, report it via dbg callbacks
431static VkBool32 validate_usage_flags(layer_data *my_data, void *disp_obj, VkFlags actual, VkFlags desired, VkBool32 strict,
432                                     uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str,
433                                     char const *func_name, char const *usage_str) {
434    VkBool32 correct_usage = VK_FALSE;
435    VkBool32 skipCall = VK_FALSE;
436    if (strict)
437        correct_usage = ((actual & desired) == desired);
438    else
439        correct_usage = ((actual & desired) != 0);
440    if (!correct_usage) {
441        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
442                           MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s %#" PRIxLEAST64
443                                                               " used by %s. In this case, %s should have %s set during creation.",
444                           ty_str, obj_handle, func_name, ty_str, usage_str);
445    }
446    return skipCall;
447}
448
449// Helper function to validate usage flags for images
450// Pulls image info and then sends actual vs. desired usage off to helper above where
451//  an error will be flagged if usage is not correct
452static VkBool32 validate_image_usage_flags(layer_data *my_data, void *disp_obj, VkImage image, VkFlags desired, VkBool32 strict,
453                                           char const *func_name, char const *usage_string) {
454    VkBool32 skipCall = VK_FALSE;
455    MT_OBJ_BINDING_INFO *pBindInfo = get_object_binding_info(my_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
456    if (pBindInfo) {
457        skipCall = validate_usage_flags(my_data, disp_obj, pBindInfo->create_info.image.usage, desired, strict, (uint64_t)image,
458                                        VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "image", func_name, usage_string);
459    }
460    return skipCall;
461}
462
463// Helper function to validate usage flags for buffers
464// Pulls buffer info and then sends actual vs. desired usage off to helper above where
465//  an error will be flagged if usage is not correct
466static VkBool32 validate_buffer_usage_flags(layer_data *my_data, void *disp_obj, VkBuffer buffer, VkFlags desired, VkBool32 strict,
467                                            char const *func_name, char const *usage_string) {
468    VkBool32 skipCall = VK_FALSE;
469    MT_OBJ_BINDING_INFO *pBindInfo = get_object_binding_info(my_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
470    if (pBindInfo) {
471        skipCall = validate_usage_flags(my_data, disp_obj, pBindInfo->create_info.buffer.usage, desired, strict, (uint64_t)buffer,
472                                        VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "buffer", func_name, usage_string);
473    }
474    return skipCall;
475}
476
477// Return ptr to info in map container containing mem, or NULL if not found
478//  Calls to this function should be wrapped in mutex
479static DEVICE_MEM_INFO *get_mem_obj_info(layer_data *dev_data, const VkDeviceMemory mem) {
480    auto item = dev_data->memObjMap.find(mem);
481    if (item != dev_data->memObjMap.end()) {
482        return &(*item).second;
483    } else {
484        return NULL;
485    }
486}
487
488static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
489                             const VkMemoryAllocateInfo *pAllocateInfo) {
490    assert(object != NULL);
491
492    memcpy(&my_data->memObjMap[mem].allocInfo, pAllocateInfo, sizeof(VkMemoryAllocateInfo));
493    // TODO:  Update for real hardware, actually process allocation info structures
494    my_data->memObjMap[mem].allocInfo.pNext = NULL;
495    my_data->memObjMap[mem].object = object;
496    my_data->memObjMap[mem].refCount = 0;
497    my_data->memObjMap[mem].mem = mem;
498    my_data->memObjMap[mem].image = VK_NULL_HANDLE;
499    my_data->memObjMap[mem].memRange.offset = 0;
500    my_data->memObjMap[mem].memRange.size = 0;
501    my_data->memObjMap[mem].pData = 0;
502    my_data->memObjMap[mem].pDriverData = 0;
503    my_data->memObjMap[mem].valid = false;
504}
505
506static VkBool32 validate_memory_is_valid(layer_data *dev_data, VkDeviceMemory mem, const char *functionName,
507                                         VkImage image = VK_NULL_HANDLE) {
508    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
509        MT_OBJ_BINDING_INFO *pBindInfo =
510            get_object_binding_info(dev_data, reinterpret_cast<const uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
511        if (pBindInfo && !pBindInfo->valid) {
512            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
513                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
514                           "%s: Cannot read invalid swapchain image %" PRIx64 ", please fill the memory before using.",
515                           functionName, (uint64_t)(image));
516        }
517    } else {
518        DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
519        if (pMemObj && !pMemObj->valid) {
520            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
521                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
522                           "%s: Cannot read invalid memory %" PRIx64 ", please fill the memory before using.", functionName,
523                           (uint64_t)(mem));
524        }
525    }
526    return false;
527}
528
529static void set_memory_valid(layer_data *dev_data, VkDeviceMemory mem, bool valid, VkImage image = VK_NULL_HANDLE) {
530    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
531        MT_OBJ_BINDING_INFO *pBindInfo =
532            get_object_binding_info(dev_data, reinterpret_cast<const uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
533        if (pBindInfo) {
534            pBindInfo->valid = valid;
535        }
536    } else {
537        DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
538        if (pMemObj) {
539            pMemObj->valid = valid;
540        }
541    }
542}
543
544// Find CB Info and add mem reference to list container
545// Find Mem Obj Info and add CB reference to list container
546static VkBool32 update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
547                                                  const char *apiName) {
548    VkBool32 skipCall = VK_FALSE;
549
550    // Skip validation if this image was created through WSI
551    if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
552
553        // First update CB binding in MemObj mini CB list
554        DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem);
555        if (pMemInfo) {
556            // Search for cmd buffer object in memory object's binding list
557            VkBool32 found = VK_FALSE;
558            if (pMemInfo->pCommandBufferBindings.size() > 0) {
559                for (list<VkCommandBuffer>::iterator it = pMemInfo->pCommandBufferBindings.begin();
560                     it != pMemInfo->pCommandBufferBindings.end(); ++it) {
561                    if ((*it) == cb) {
562                        found = VK_TRUE;
563                        break;
564                    }
565                }
566            }
567            // If not present, add to list
568            if (found == VK_FALSE) {
569                pMemInfo->pCommandBufferBindings.push_front(cb);
570                pMemInfo->refCount++;
571            }
572            // Now update CBInfo's Mem reference list
573            GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
574            // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
575            if (pCBNode) {
576                // Search for memory object in cmd buffer's reference list
577                VkBool32 found = VK_FALSE;
578                if (pCBNode->pMemObjList.size() > 0) {
579                    for (auto it = pCBNode->pMemObjList.begin(); it != pCBNode->pMemObjList.end(); ++it) {
580                        if ((*it) == mem) {
581                            found = VK_TRUE;
582                            break;
583                        }
584                    }
585                }
586                // If not present, add to list
587                if (found == VK_FALSE) {
588                    pCBNode->pMemObjList.push_front(mem);
589                }
590            }
591        }
592    }
593    return skipCall;
594}
595
596// Free bindings related to CB
597static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
598    GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
599
600    if (pCBNode) {
601        if (pCBNode->pMemObjList.size() > 0) {
602            list<VkDeviceMemory> mem_obj_list = pCBNode->pMemObjList;
603            for (list<VkDeviceMemory>::iterator it = mem_obj_list.begin(); it != mem_obj_list.end(); ++it) {
604                DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, *it);
605                if (pInfo) {
606                    pInfo->pCommandBufferBindings.remove(cb);
607                    pInfo->refCount--;
608                }
609            }
610            pCBNode->pMemObjList.clear();
611        }
612        pCBNode->activeDescriptorSets.clear();
613        pCBNode->validate_functions.clear();
614    }
615}
616
617// Delete the entire CB list
618static void delete_cmd_buf_info_list(layer_data *my_data) {
619    for (auto &cb_node : my_data->commandBufferMap) {
620        clear_cmd_buf_and_mem_references(my_data, cb_node.first);
621    }
622    my_data->commandBufferMap.clear();
623}
624
625// For given MemObjInfo, report Obj & CB bindings
626static VkBool32 reportMemReferencesAndCleanUp(layer_data *dev_data, DEVICE_MEM_INFO *pMemObjInfo) {
627    VkBool32 skipCall = VK_FALSE;
628    size_t cmdBufRefCount = pMemObjInfo->pCommandBufferBindings.size();
629    size_t objRefCount = pMemObjInfo->pObjBindings.size();
630
631    if ((pMemObjInfo->pCommandBufferBindings.size()) != 0) {
632        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
633                           (uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
634                           "Attempting to free memory object %#" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER
635                           " references",
636                           (uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount));
637    }
638
639    if (cmdBufRefCount > 0 && pMemObjInfo->pCommandBufferBindings.size() > 0) {
640        for (list<VkCommandBuffer>::const_iterator it = pMemObjInfo->pCommandBufferBindings.begin();
641             it != pMemObjInfo->pCommandBufferBindings.end(); ++it) {
642            // TODO : CommandBuffer should be source Obj here
643            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
644                    (uint64_t)(*it), __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
645                    "Command Buffer %p still has a reference to mem obj %#" PRIxLEAST64, (*it), (uint64_t)pMemObjInfo->mem);
646        }
647        // Clear the list of hanging references
648        pMemObjInfo->pCommandBufferBindings.clear();
649    }
650
651    if (objRefCount > 0 && pMemObjInfo->pObjBindings.size() > 0) {
652        for (auto it = pMemObjInfo->pObjBindings.begin(); it != pMemObjInfo->pObjBindings.end(); ++it) {
653            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, it->type, it->handle, __LINE__,
654                    MEMTRACK_FREED_MEM_REF, "MEM", "VK Object %#" PRIxLEAST64 " still has a reference to mem obj %#" PRIxLEAST64,
655                    it->handle, (uint64_t)pMemObjInfo->mem);
656        }
657        // Clear the list of hanging references
658        pMemObjInfo->pObjBindings.clear();
659    }
660    return skipCall;
661}
662
663static VkBool32 deleteMemObjInfo(layer_data *my_data, void *object, VkDeviceMemory mem) {
664    VkBool32 skipCall = VK_FALSE;
665    auto item = my_data->memObjMap.find(mem);
666    if (item != my_data->memObjMap.end()) {
667        my_data->memObjMap.erase(item);
668    } else {
669        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
670                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
671                           "Request to delete memory object %#" PRIxLEAST64 " not present in memory Object Map", (uint64_t)mem);
672    }
673    return skipCall;
674}
675
676// Check if fence for given CB is completed
677static bool checkCBCompleted(layer_data *my_data, const VkCommandBuffer cb, bool *complete) {
678    GLOBAL_CB_NODE *pCBNode = getCBNode(my_data, cb);
679    VkBool32 skipCall = false;
680    *complete = true;
681
682    if (pCBNode) {
683        if (pCBNode->lastSubmittedQueue != NULL) {
684            VkQueue queue = pCBNode->lastSubmittedQueue;
685            QUEUE_NODE *pQueueInfo = &my_data->queueMap[queue];
686            if (pCBNode->fenceId > pQueueInfo->lastRetiredId) {
687                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
688                                   VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)cb, __LINE__, MEMTRACK_NONE, "MEM",
689                                   "fence %#" PRIxLEAST64 " for CB %p has not been checked for completion",
690                                   (uint64_t)pCBNode->lastSubmittedFence, cb);
691                *complete = false;
692            }
693        }
694    }
695    return skipCall;
696}
697
698static VkBool32 freeMemObjInfo(layer_data *dev_data, void *object, VkDeviceMemory mem, VkBool32 internal) {
699    VkBool32 skipCall = VK_FALSE;
700    // Parse global list to find info w/ mem
701    DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
702    if (pInfo) {
703        if (pInfo->allocInfo.allocationSize == 0 && !internal) {
704            // TODO: Verify against Valid Use section
705            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
706                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
707                               "Attempting to free memory associated with a Persistent Image, %#" PRIxLEAST64 ", "
708                               "this should not be explicitly freed\n",
709                               (uint64_t)mem);
710        } else {
711            // Clear any CB bindings for completed CBs
712            //   TODO : Is there a better place to do this?
713
714            bool commandBufferComplete = false;
715            assert(pInfo->object != VK_NULL_HANDLE);
716            list<VkCommandBuffer>::iterator it = pInfo->pCommandBufferBindings.begin();
717            list<VkCommandBuffer>::iterator temp;
718            while (pInfo->pCommandBufferBindings.size() > 0 && it != pInfo->pCommandBufferBindings.end()) {
719                skipCall |= checkCBCompleted(dev_data, *it, &commandBufferComplete);
720                if (commandBufferComplete) {
721                    temp = it;
722                    ++temp;
723                    clear_cmd_buf_and_mem_references(dev_data, *it);
724                    it = temp;
725                } else {
726                    ++it;
727                }
728            }
729
730            // Now verify that no references to this mem obj remain and remove bindings
731            if (0 != pInfo->refCount) {
732                skipCall |= reportMemReferencesAndCleanUp(dev_data, pInfo);
733            }
734            // Delete mem obj info
735            skipCall |= deleteMemObjInfo(dev_data, object, mem);
736        }
737    }
738    return skipCall;
739}
740
741static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
742    switch (type) {
743    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
744        return "image";
745        break;
746    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
747        return "buffer";
748        break;
749    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
750        return "swapchain";
751        break;
752    default:
753        return "unknown";
754    }
755}
756
757// Remove object binding performs 3 tasks:
758// 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it
759// 2. Decrement refCount for MemObjInfo
760// 3. Clear mem binding for image/buffer by setting its handle to 0
761// TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized?
762static VkBool32 clear_object_binding(layer_data *dev_data, void *dispObj, uint64_t handle, VkDebugReportObjectTypeEXT type) {
763    // TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately
764    VkBool32 skipCall = VK_FALSE;
765    MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(dev_data, handle, type);
766    if (pObjBindInfo) {
767        DEVICE_MEM_INFO *pMemObjInfo = get_mem_obj_info(dev_data, pObjBindInfo->mem);
768        // TODO : Make sure this is a reasonable way to reset mem binding
769        pObjBindInfo->mem = VK_NULL_HANDLE;
770        if (pMemObjInfo) {
771            // This obj is bound to a memory object. Remove the reference to this object in that memory object's list, decrement the
772            // memObj's refcount
773            // and set the objects memory binding pointer to NULL.
774            VkBool32 clearSucceeded = VK_FALSE;
775            for (auto it = pMemObjInfo->pObjBindings.begin(); it != pMemObjInfo->pObjBindings.end(); ++it) {
776                if ((it->handle == handle) && (it->type == type)) {
777                    pMemObjInfo->refCount--;
778                    pMemObjInfo->pObjBindings.erase(it);
779                    clearSucceeded = VK_TRUE;
780                    break;
781                }
782            }
783            if (VK_FALSE == clearSucceeded) {
784                skipCall |=
785                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
786                            "MEM", "While trying to clear mem binding for %s obj %#" PRIxLEAST64
787                                   ", unable to find that object referenced by mem obj %#" PRIxLEAST64,
788                            object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem);
789            }
790        }
791    }
792    return skipCall;
793}
794
795// For NULL mem case, output warning
796// Make sure given object is in global object map
797//  IF a previous binding existed, output validation error
798//  Otherwise, add reference from objectInfo to memoryInfo
799//  Add reference off of objInfo
800//  device is required for error logging, need a dispatchable
801//  object for that.
802static VkBool32 set_mem_binding(layer_data *dev_data, void *dispatch_object, VkDeviceMemory mem, uint64_t handle,
803                                VkDebugReportObjectTypeEXT type, const char *apiName) {
804    VkBool32 skipCall = VK_FALSE;
805    // Handle NULL case separately, just clear previous binding & decrement reference
806    if (mem == VK_NULL_HANDLE) {
807        // TODO: Verify against Valid Use section of spec.
808        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ,
809                           "MEM", "In %s, attempting to Bind Obj(%#" PRIxLEAST64 ") to NULL", apiName, handle);
810    } else {
811        MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(dev_data, handle, type);
812        if (!pObjBindInfo) {
813            skipCall |=
814                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS,
815                        "MEM", "In %s, attempting to update Binding of %s Obj(%#" PRIxLEAST64 ") that's not in global list()",
816                        object_type_to_string(type), apiName, handle);
817        } else {
818            // non-null case so should have real mem obj
819            DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem);
820            if (pMemInfo) {
821                // TODO : Need to track mem binding for obj and report conflict here
822                DEVICE_MEM_INFO *pPrevBinding = get_mem_obj_info(dev_data, pObjBindInfo->mem);
823                if (pPrevBinding != NULL) {
824                    skipCall |=
825                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
826                                (uint64_t)mem, __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
827                                "In %s, attempting to bind memory (%#" PRIxLEAST64 ") to object (%#" PRIxLEAST64
828                                ") which has already been bound to mem object %#" PRIxLEAST64,
829                                apiName, (uint64_t)mem, handle, (uint64_t)pPrevBinding->mem);
830                } else {
831                    MT_OBJ_HANDLE_TYPE oht;
832                    oht.handle = handle;
833                    oht.type = type;
834                    pMemInfo->pObjBindings.push_front(oht);
835                    pMemInfo->refCount++;
836                    // For image objects, make sure default memory state is correctly set
837                    // TODO : What's the best/correct way to handle this?
838                    if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
839                        VkImageCreateInfo ici = pObjBindInfo->create_info.image;
840                        if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
841                            // TODO::  More memory state transition stuff.
842                        }
843                    }
844                    pObjBindInfo->mem = mem;
845                }
846            }
847        }
848    }
849    return skipCall;
850}
851
852// For NULL mem case, clear any previous binding Else...
853// Make sure given object is in its object map
854//  IF a previous binding existed, update binding
855//  Add reference from objectInfo to memoryInfo
856//  Add reference off of object's binding info
857// Return VK_TRUE if addition is successful, VK_FALSE otherwise
858static VkBool32 set_sparse_mem_binding(layer_data *dev_data, void *dispObject, VkDeviceMemory mem, uint64_t handle,
859                                       VkDebugReportObjectTypeEXT type, const char *apiName) {
860    VkBool32 skipCall = VK_FALSE;
861    // Handle NULL case separately, just clear previous binding & decrement reference
862    if (mem == VK_NULL_HANDLE) {
863        skipCall = clear_object_binding(dev_data, dispObject, handle, type);
864    } else {
865        MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(dev_data, handle, type);
866        if (!pObjBindInfo) {
867            skipCall |= log_msg(
868                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS, "MEM",
869                "In %s, attempting to update Binding of Obj(%#" PRIxLEAST64 ") that's not in global list()", apiName, handle);
870        }
871        // non-null case so should have real mem obj
872        DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
873        if (pInfo) {
874            // Search for object in memory object's binding list
875            VkBool32 found = VK_FALSE;
876            if (pInfo->pObjBindings.size() > 0) {
877                for (auto it = pInfo->pObjBindings.begin(); it != pInfo->pObjBindings.end(); ++it) {
878                    if (((*it).handle == handle) && ((*it).type == type)) {
879                        found = VK_TRUE;
880                        break;
881                    }
882                }
883            }
884            // If not present, add to list
885            if (found == VK_FALSE) {
886                MT_OBJ_HANDLE_TYPE oht;
887                oht.handle = handle;
888                oht.type = type;
889                pInfo->pObjBindings.push_front(oht);
890                pInfo->refCount++;
891            }
892            // Need to set mem binding for this object
893            pObjBindInfo->mem = mem;
894        }
895    }
896    return skipCall;
897}
898
899template <typename T>
900void print_object_map_members(layer_data *my_data, void *dispObj, T const &objectName, VkDebugReportObjectTypeEXT objectType,
901                              const char *objectStr) {
902    for (auto const &element : objectName) {
903        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objectType, 0, __LINE__, MEMTRACK_NONE, "MEM",
904                "    %s Object list contains %s Object %#" PRIxLEAST64 " ", objectStr, objectStr, element.first);
905    }
906}
907
908// For given Object, get 'mem' obj that it's bound to or NULL if no binding
909static VkBool32 get_mem_binding_from_object(layer_data *my_data, void *dispObj, const uint64_t handle,
910                                            const VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) {
911    VkBool32 skipCall = VK_FALSE;
912    *mem = VK_NULL_HANDLE;
913    MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(my_data, handle, type);
914    if (pObjBindInfo) {
915        if (pObjBindInfo->mem) {
916            *mem = pObjBindInfo->mem;
917        } else {
918            skipCall =
919                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS,
920                        "MEM", "Trying to get mem binding for object %#" PRIxLEAST64 " but object has no mem binding", handle);
921        }
922    } else {
923        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
924                           "MEM", "Trying to get mem binding for object %#" PRIxLEAST64 " but no such object in %s list", handle,
925                           object_type_to_string(type));
926    }
927    return skipCall;
928}
929
930// Print details of MemObjInfo list
931static void print_mem_list(layer_data *dev_data, void *dispObj) {
932    DEVICE_MEM_INFO *pInfo = NULL;
933
934    // Early out if info is not requested
935    if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
936        return;
937    }
938
939    // Just printing each msg individually for now, may want to package these into single large print
940    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
941            MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
942            dev_data->memObjMap.size());
943    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
944            MEMTRACK_NONE, "MEM", "=============================");
945
946    if (dev_data->memObjMap.size() <= 0)
947        return;
948
949    for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
950        pInfo = &(*ii).second;
951
952        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
953                __LINE__, MEMTRACK_NONE, "MEM", "    ===MemObjInfo at %p===", (void *)pInfo);
954        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
955                __LINE__, MEMTRACK_NONE, "MEM", "    Mem object: %#" PRIxLEAST64, (uint64_t)(pInfo->mem));
956        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
957                __LINE__, MEMTRACK_NONE, "MEM", "    Ref Count: %u", pInfo->refCount);
958        if (0 != pInfo->allocInfo.allocationSize) {
959            string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&pInfo->allocInfo, "MEM(INFO):         ");
960            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
961                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
962        } else {
963            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
964                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
965        }
966
967        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
968                __LINE__, MEMTRACK_NONE, "MEM", "    VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:",
969                pInfo->pObjBindings.size());
970        if (pInfo->pObjBindings.size() > 0) {
971            for (list<MT_OBJ_HANDLE_TYPE>::iterator it = pInfo->pObjBindings.begin(); it != pInfo->pObjBindings.end(); ++it) {
972                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
973                        0, __LINE__, MEMTRACK_NONE, "MEM", "       VK OBJECT %" PRIu64, it->handle);
974            }
975        }
976
977        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
978                __LINE__, MEMTRACK_NONE, "MEM",
979                "    VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements",
980                pInfo->pCommandBufferBindings.size());
981        if (pInfo->pCommandBufferBindings.size() > 0) {
982            for (list<VkCommandBuffer>::iterator it = pInfo->pCommandBufferBindings.begin();
983                 it != pInfo->pCommandBufferBindings.end(); ++it) {
984                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
985                        0, __LINE__, MEMTRACK_NONE, "MEM", "      VK CB %p", (*it));
986            }
987        }
988    }
989}
990
991static void printCBList(layer_data *my_data, void *dispObj) {
992    GLOBAL_CB_NODE *pCBInfo = NULL;
993
994    // Early out if info is not requested
995    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
996        return;
997    }
998
999    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
1000            MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
1001            my_data->commandBufferMap.size());
1002    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
1003            MEMTRACK_NONE, "MEM", "==================");
1004
1005    if (my_data->commandBufferMap.size() <= 0)
1006        return;
1007
1008    for (auto &cb_node : my_data->commandBufferMap) {
1009        pCBInfo = cb_node.second;
1010
1011        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
1012                __LINE__, MEMTRACK_NONE, "MEM", "    CB Info (%p) has CB %p, fenceId %" PRIx64 ", and fence %#" PRIxLEAST64,
1013                (void *)pCBInfo, (void *)pCBInfo->commandBuffer, pCBInfo->fenceId, (uint64_t)pCBInfo->lastSubmittedFence);
1014
1015        if (pCBInfo->pMemObjList.size() <= 0)
1016            continue;
1017        for (list<VkDeviceMemory>::iterator it = pCBInfo->pMemObjList.begin(); it != pCBInfo->pMemObjList.end(); ++it) {
1018            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
1019                    __LINE__, MEMTRACK_NONE, "MEM", "      Mem obj %" PRIu64, (uint64_t)(*it));
1020        }
1021    }
1022}
1023
1024#endif
1025
1026// Map actual TID to an index value and return that index
1027//  This keeps TIDs in range from 0-MAX_TID and simplifies compares between runs
1028static uint32_t getTIDIndex() {
1029    loader_platform_thread_id tid = loader_platform_get_thread_id();
1030    for (uint32_t i = 0; i < g_maxTID; i++) {
1031        if (tid == g_tidMapping[i])
1032            return i;
1033    }
1034    // Don't yet have mapping, set it and return newly set index
1035    uint32_t retVal = (uint32_t)g_maxTID;
1036    g_tidMapping[g_maxTID++] = tid;
1037    assert(g_maxTID < MAX_TID);
1038    return retVal;
1039}
1040
1041// Return a string representation of CMD_TYPE enum
1042static string cmdTypeToString(CMD_TYPE cmd) {
1043    switch (cmd) {
1044    case CMD_BINDPIPELINE:
1045        return "CMD_BINDPIPELINE";
1046    case CMD_BINDPIPELINEDELTA:
1047        return "CMD_BINDPIPELINEDELTA";
1048    case CMD_SETVIEWPORTSTATE:
1049        return "CMD_SETVIEWPORTSTATE";
1050    case CMD_SETLINEWIDTHSTATE:
1051        return "CMD_SETLINEWIDTHSTATE";
1052    case CMD_SETDEPTHBIASSTATE:
1053        return "CMD_SETDEPTHBIASSTATE";
1054    case CMD_SETBLENDSTATE:
1055        return "CMD_SETBLENDSTATE";
1056    case CMD_SETDEPTHBOUNDSSTATE:
1057        return "CMD_SETDEPTHBOUNDSSTATE";
1058    case CMD_SETSTENCILREADMASKSTATE:
1059        return "CMD_SETSTENCILREADMASKSTATE";
1060    case CMD_SETSTENCILWRITEMASKSTATE:
1061        return "CMD_SETSTENCILWRITEMASKSTATE";
1062    case CMD_SETSTENCILREFERENCESTATE:
1063        return "CMD_SETSTENCILREFERENCESTATE";
1064    case CMD_BINDDESCRIPTORSETS:
1065        return "CMD_BINDDESCRIPTORSETS";
1066    case CMD_BINDINDEXBUFFER:
1067        return "CMD_BINDINDEXBUFFER";
1068    case CMD_BINDVERTEXBUFFER:
1069        return "CMD_BINDVERTEXBUFFER";
1070    case CMD_DRAW:
1071        return "CMD_DRAW";
1072    case CMD_DRAWINDEXED:
1073        return "CMD_DRAWINDEXED";
1074    case CMD_DRAWINDIRECT:
1075        return "CMD_DRAWINDIRECT";
1076    case CMD_DRAWINDEXEDINDIRECT:
1077        return "CMD_DRAWINDEXEDINDIRECT";
1078    case CMD_DISPATCH:
1079        return "CMD_DISPATCH";
1080    case CMD_DISPATCHINDIRECT:
1081        return "CMD_DISPATCHINDIRECT";
1082    case CMD_COPYBUFFER:
1083        return "CMD_COPYBUFFER";
1084    case CMD_COPYIMAGE:
1085        return "CMD_COPYIMAGE";
1086    case CMD_BLITIMAGE:
1087        return "CMD_BLITIMAGE";
1088    case CMD_COPYBUFFERTOIMAGE:
1089        return "CMD_COPYBUFFERTOIMAGE";
1090    case CMD_COPYIMAGETOBUFFER:
1091        return "CMD_COPYIMAGETOBUFFER";
1092    case CMD_CLONEIMAGEDATA:
1093        return "CMD_CLONEIMAGEDATA";
1094    case CMD_UPDATEBUFFER:
1095        return "CMD_UPDATEBUFFER";
1096    case CMD_FILLBUFFER:
1097        return "CMD_FILLBUFFER";
1098    case CMD_CLEARCOLORIMAGE:
1099        return "CMD_CLEARCOLORIMAGE";
1100    case CMD_CLEARATTACHMENTS:
1101        return "CMD_CLEARCOLORATTACHMENT";
1102    case CMD_CLEARDEPTHSTENCILIMAGE:
1103        return "CMD_CLEARDEPTHSTENCILIMAGE";
1104    case CMD_RESOLVEIMAGE:
1105        return "CMD_RESOLVEIMAGE";
1106    case CMD_SETEVENT:
1107        return "CMD_SETEVENT";
1108    case CMD_RESETEVENT:
1109        return "CMD_RESETEVENT";
1110    case CMD_WAITEVENTS:
1111        return "CMD_WAITEVENTS";
1112    case CMD_PIPELINEBARRIER:
1113        return "CMD_PIPELINEBARRIER";
1114    case CMD_BEGINQUERY:
1115        return "CMD_BEGINQUERY";
1116    case CMD_ENDQUERY:
1117        return "CMD_ENDQUERY";
1118    case CMD_RESETQUERYPOOL:
1119        return "CMD_RESETQUERYPOOL";
1120    case CMD_COPYQUERYPOOLRESULTS:
1121        return "CMD_COPYQUERYPOOLRESULTS";
1122    case CMD_WRITETIMESTAMP:
1123        return "CMD_WRITETIMESTAMP";
1124    case CMD_INITATOMICCOUNTERS:
1125        return "CMD_INITATOMICCOUNTERS";
1126    case CMD_LOADATOMICCOUNTERS:
1127        return "CMD_LOADATOMICCOUNTERS";
1128    case CMD_SAVEATOMICCOUNTERS:
1129        return "CMD_SAVEATOMICCOUNTERS";
1130    case CMD_BEGINRENDERPASS:
1131        return "CMD_BEGINRENDERPASS";
1132    case CMD_ENDRENDERPASS:
1133        return "CMD_ENDRENDERPASS";
1134    default:
1135        return "UNKNOWN";
1136    }
1137}
1138
1139// SPIRV utility functions
1140static void build_def_index(shader_module *module) {
1141    for (auto insn : *module) {
1142        switch (insn.opcode()) {
1143        /* Types */
1144        case spv::OpTypeVoid:
1145        case spv::OpTypeBool:
1146        case spv::OpTypeInt:
1147        case spv::OpTypeFloat:
1148        case spv::OpTypeVector:
1149        case spv::OpTypeMatrix:
1150        case spv::OpTypeImage:
1151        case spv::OpTypeSampler:
1152        case spv::OpTypeSampledImage:
1153        case spv::OpTypeArray:
1154        case spv::OpTypeRuntimeArray:
1155        case spv::OpTypeStruct:
1156        case spv::OpTypeOpaque:
1157        case spv::OpTypePointer:
1158        case spv::OpTypeFunction:
1159        case spv::OpTypeEvent:
1160        case spv::OpTypeDeviceEvent:
1161        case spv::OpTypeReserveId:
1162        case spv::OpTypeQueue:
1163        case spv::OpTypePipe:
1164            module->def_index[insn.word(1)] = insn.offset();
1165            break;
1166
1167        /* Fixed constants */
1168        case spv::OpConstantTrue:
1169        case spv::OpConstantFalse:
1170        case spv::OpConstant:
1171        case spv::OpConstantComposite:
1172        case spv::OpConstantSampler:
1173        case spv::OpConstantNull:
1174            module->def_index[insn.word(2)] = insn.offset();
1175            break;
1176
1177        /* Specialization constants */
1178        case spv::OpSpecConstantTrue:
1179        case spv::OpSpecConstantFalse:
1180        case spv::OpSpecConstant:
1181        case spv::OpSpecConstantComposite:
1182        case spv::OpSpecConstantOp:
1183            module->def_index[insn.word(2)] = insn.offset();
1184            break;
1185
1186        /* Variables */
1187        case spv::OpVariable:
1188            module->def_index[insn.word(2)] = insn.offset();
1189            break;
1190
1191        /* Functions */
1192        case spv::OpFunction:
1193            module->def_index[insn.word(2)] = insn.offset();
1194            break;
1195
1196        default:
1197            /* We don't care about any other defs for now. */
1198            break;
1199        }
1200    }
1201}
1202
1203static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
1204    for (auto insn : *src) {
1205        if (insn.opcode() == spv::OpEntryPoint) {
1206            auto entrypointName = (char const *)&insn.word(3);
1207            auto entrypointStageBits = 1u << insn.word(1);
1208
1209            if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
1210                return insn;
1211            }
1212        }
1213    }
1214
1215    return src->end();
1216}
1217
1218bool shader_is_spirv(VkShaderModuleCreateInfo const *pCreateInfo) {
1219    uint32_t *words = (uint32_t *)pCreateInfo->pCode;
1220    size_t sizeInWords = pCreateInfo->codeSize / sizeof(uint32_t);
1221
1222    /* Just validate that the header makes sense. */
1223    return sizeInWords >= 5 && words[0] == spv::MagicNumber && words[1] == spv::Version;
1224}
1225
1226static char const *storage_class_name(unsigned sc) {
1227    switch (sc) {
1228    case spv::StorageClassInput:
1229        return "input";
1230    case spv::StorageClassOutput:
1231        return "output";
1232    case spv::StorageClassUniformConstant:
1233        return "const uniform";
1234    case spv::StorageClassUniform:
1235        return "uniform";
1236    case spv::StorageClassWorkgroup:
1237        return "workgroup local";
1238    case spv::StorageClassCrossWorkgroup:
1239        return "workgroup global";
1240    case spv::StorageClassPrivate:
1241        return "private global";
1242    case spv::StorageClassFunction:
1243        return "function";
1244    case spv::StorageClassGeneric:
1245        return "generic";
1246    case spv::StorageClassAtomicCounter:
1247        return "atomic counter";
1248    case spv::StorageClassImage:
1249        return "image";
1250    case spv::StorageClassPushConstant:
1251        return "push constant";
1252    default:
1253        return "unknown";
1254    }
1255}
1256
1257/* get the value of an integral constant */
1258unsigned get_constant_value(shader_module const *src, unsigned id) {
1259    auto value = src->get_def(id);
1260    assert(value != src->end());
1261
1262    if (value.opcode() != spv::OpConstant) {
1263        /* TODO: Either ensure that the specialization transform is already performed on a module we're
1264            considering here, OR -- specialize on the fly now.
1265            */
1266        return 1;
1267    }
1268
1269    return value.word(3);
1270}
1271
1272
1273static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
1274    auto insn = src->get_def(type);
1275    assert(insn != src->end());
1276
1277    switch (insn.opcode()) {
1278    case spv::OpTypeBool:
1279        ss << "bool";
1280        break;
1281    case spv::OpTypeInt:
1282        ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
1283        break;
1284    case spv::OpTypeFloat:
1285        ss << "float" << insn.word(2);
1286        break;
1287    case spv::OpTypeVector:
1288        ss << "vec" << insn.word(3) << " of ";
1289        describe_type_inner(ss, src, insn.word(2));
1290        break;
1291    case spv::OpTypeMatrix:
1292        ss << "mat" << insn.word(3) << " of ";
1293        describe_type_inner(ss, src, insn.word(2));
1294        break;
1295    case spv::OpTypeArray:
1296        ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
1297        describe_type_inner(ss, src, insn.word(2));
1298        break;
1299    case spv::OpTypePointer:
1300        ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1301        describe_type_inner(ss, src, insn.word(3));
1302        break;
1303    case spv::OpTypeStruct: {
1304        ss << "struct of (";
1305        for (unsigned i = 2; i < insn.len(); i++) {
1306            describe_type_inner(ss, src, insn.word(i));
1307            if (i == insn.len() - 1) {
1308                ss << ")";
1309            } else {
1310                ss << ", ";
1311            }
1312        }
1313        break;
1314    }
1315    case spv::OpTypeSampler:
1316        ss << "sampler";
1317        break;
1318    case spv::OpTypeSampledImage:
1319        ss << "sampler+";
1320        describe_type_inner(ss, src, insn.word(2));
1321        break;
1322    case spv::OpTypeImage:
1323        ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1324        break;
1325    default:
1326        ss << "oddtype";
1327        break;
1328    }
1329}
1330
1331
1332static std::string describe_type(shader_module const *src, unsigned type) {
1333    std::ostringstream ss;
1334    describe_type_inner(ss, src, type);
1335    return ss.str();
1336}
1337
1338
1339static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool b_arrayed) {
1340    /* walk two type trees together, and complain about differences */
1341    auto a_insn = a->get_def(a_type);
1342    auto b_insn = b->get_def(b_type);
1343    assert(a_insn != a->end());
1344    assert(b_insn != b->end());
1345
1346    if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1347        /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */
1348        return types_match(a, b, a_type, b_insn.word(2), false);
1349    }
1350
1351    if (a_insn.opcode() != b_insn.opcode()) {
1352        return false;
1353    }
1354
1355    switch (a_insn.opcode()) {
1356    /* if b_arrayed and we hit a leaf type, then we can't match -- there's nowhere for the extra OpTypeArray to be! */
1357    case spv::OpTypeBool:
1358        return true && !b_arrayed;
1359    case spv::OpTypeInt:
1360        /* match on width, signedness */
1361        return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3) && !b_arrayed;
1362    case spv::OpTypeFloat:
1363        /* match on width */
1364        return a_insn.word(2) == b_insn.word(2) && !b_arrayed;
1365    case spv::OpTypeVector:
1366    case spv::OpTypeMatrix:
1367        /* match on element type, count. these all have the same layout. we don't get here if
1368         * b_arrayed -- that is handled above. */
1369        return !b_arrayed && types_match(a, b, a_insn.word(2), b_insn.word(2), b_arrayed) && a_insn.word(3) == b_insn.word(3);
1370    case spv::OpTypeArray:
1371        /* match on element type, count. these all have the same layout. we don't get here if
1372         * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction,
1373         * not a literal within OpTypeArray */
1374        return !b_arrayed && types_match(a, b, a_insn.word(2), b_insn.word(2), b_arrayed) &&
1375               get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1376    case spv::OpTypeStruct:
1377        /* match on all element types */
1378        {
1379            if (b_arrayed) {
1380                /* for the purposes of matching different levels of arrayness, structs are leaves. */
1381                return false;
1382            }
1383
1384            if (a_insn.len() != b_insn.len()) {
1385                return false; /* structs cannot match if member counts differ */
1386            }
1387
1388            for (unsigned i = 2; i < a_insn.len(); i++) {
1389                if (!types_match(a, b, a_insn.word(i), b_insn.word(i), b_arrayed)) {
1390                    return false;
1391                }
1392            }
1393
1394            return true;
1395        }
1396    case spv::OpTypePointer:
1397        /* match on pointee type. storage class is expected to differ */
1398        return types_match(a, b, a_insn.word(3), b_insn.word(3), b_arrayed);
1399
1400    default:
1401        /* remaining types are CLisms, or may not appear in the interfaces we
1402         * are interested in. Just claim no match.
1403         */
1404        return false;
1405    }
1406}
1407
1408static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1409    auto it = map.find(id);
1410    if (it == map.end())
1411        return def;
1412    else
1413        return it->second;
1414}
1415
1416static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1417    auto insn = src->get_def(type);
1418    assert(insn != src->end());
1419
1420    switch (insn.opcode()) {
1421    case spv::OpTypePointer:
1422        /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
1423         * we're never actually passing pointers around. */
1424        return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1425    case spv::OpTypeArray:
1426        if (strip_array_level) {
1427            return get_locations_consumed_by_type(src, insn.word(2), false);
1428        } else {
1429            return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1430        }
1431    case spv::OpTypeMatrix:
1432        /* num locations is the dimension * element size */
1433        return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1434    default:
1435        /* everything else is just 1. */
1436        return 1;
1437
1438        /* TODO: extend to handle 64bit scalar types, whose vectors may need
1439         * multiple locations. */
1440    }
1441}
1442
1443typedef std::pair<unsigned, unsigned> location_t;
1444typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1445
1446struct interface_var {
1447    uint32_t id;
1448    uint32_t type_id;
1449    uint32_t offset;
1450    /* TODO: collect the name, too? Isn't required to be present. */
1451};
1452
1453static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1454    while (true) {
1455
1456        if (def.opcode() == spv::OpTypePointer) {
1457            def = src->get_def(def.word(3));
1458        } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1459            def = src->get_def(def.word(2));
1460            is_array_of_verts = false;
1461        } else if (def.opcode() == spv::OpTypeStruct) {
1462            return def;
1463        } else {
1464            return src->end();
1465        }
1466    }
1467}
1468
1469static void collect_interface_block_members(layer_data *my_data, VkDevice dev, shader_module const *src,
1470                                            std::map<location_t, interface_var> &out,
1471                                            std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1472                                            uint32_t id, uint32_t type_id) {
1473    /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */
1474    auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts);
1475    if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1476        /* this isn't an interface block. */
1477        return;
1478    }
1479
1480    std::unordered_map<unsigned, unsigned> member_components;
1481
1482    /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */
1483    for (auto insn : *src) {
1484        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1485            unsigned member_index = insn.word(2);
1486
1487            if (insn.word(3) == spv::DecorationComponent) {
1488                unsigned component = insn.word(4);
1489                member_components[member_index] = component;
1490            }
1491        }
1492    }
1493
1494    /* Second pass -- produce the output, from Location decorations */
1495    for (auto insn : *src) {
1496        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1497            unsigned member_index = insn.word(2);
1498            unsigned member_type_id = type.word(2 + member_index);
1499
1500            if (insn.word(3) == spv::DecorationLocation) {
1501                unsigned location = insn.word(4);
1502                unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1503                auto component_it = member_components.find(member_index);
1504                unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1505
1506                for (unsigned int offset = 0; offset < num_locations; offset++) {
1507                    interface_var v;
1508                    v.id = id;
1509                    /* TODO: member index in interface_var too? */
1510                    v.type_id = member_type_id;
1511                    v.offset = offset;
1512                    out[std::make_pair(location + offset, component)] = v;
1513                }
1514            }
1515        }
1516    }
1517}
1518
1519static void collect_interface_by_location(layer_data *my_data, VkDevice dev, shader_module const *src, spirv_inst_iter entrypoint,
1520                                          spv::StorageClass sinterface, std::map<location_t, interface_var> &out,
1521                                          bool is_array_of_verts) {
1522    std::unordered_map<unsigned, unsigned> var_locations;
1523    std::unordered_map<unsigned, unsigned> var_builtins;
1524    std::unordered_map<unsigned, unsigned> var_components;
1525    std::unordered_map<unsigned, unsigned> blocks;
1526
1527    for (auto insn : *src) {
1528
1529        /* We consider two interface models: SSO rendezvous-by-location, and
1530         * builtins. Complain about anything that fits neither model.
1531         */
1532        if (insn.opcode() == spv::OpDecorate) {
1533            if (insn.word(2) == spv::DecorationLocation) {
1534                var_locations[insn.word(1)] = insn.word(3);
1535            }
1536
1537            if (insn.word(2) == spv::DecorationBuiltIn) {
1538                var_builtins[insn.word(1)] = insn.word(3);
1539            }
1540
1541            if (insn.word(2) == spv::DecorationComponent) {
1542                var_components[insn.word(1)] = insn.word(3);
1543            }
1544
1545            if (insn.word(2) == spv::DecorationBlock) {
1546                blocks[insn.word(1)] = 1;
1547            }
1548        }
1549    }
1550
1551    /* TODO: handle grouped decorations */
1552    /* TODO: handle index=1 dual source outputs from FS -- two vars will
1553     * have the same location, and we DONT want to clobber. */
1554
1555    /* find the end of the entrypoint's name string. additional zero bytes follow the actual null
1556       terminator, to fill out the rest of the word - so we only need to look at the last byte in
1557       the word to determine which word contains the terminator. */
1558    auto word = 3;
1559    while (entrypoint.word(word) & 0xff000000u) {
1560        ++word;
1561    }
1562    ++word;
1563
1564    for (; word < entrypoint.len(); word++) {
1565        auto insn = src->get_def(entrypoint.word(word));
1566        assert(insn != src->end());
1567        assert(insn.opcode() == spv::OpVariable);
1568
1569        if (insn.word(3) == sinterface) {
1570            unsigned id = insn.word(2);
1571            unsigned type = insn.word(1);
1572
1573            int location = value_or_default(var_locations, id, -1);
1574            int builtin = value_or_default(var_builtins, id, -1);
1575            unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */
1576
1577            /* All variables and interface block members in the Input or Output storage classes
1578             * must be decorated with either a builtin or an explicit location.
1579             *
1580             * TODO: integrate the interface block support here. For now, don't complain --
1581             * a valid SPIRV module will only hit this path for the interface block case, as the
1582             * individual members of the type are decorated, rather than variable declarations.
1583             */
1584
1585            if (location != -1) {
1586                /* A user-defined interface variable, with a location. Where a variable
1587                 * occupied multiple locations, emit one result for each. */
1588                unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts);
1589                for (unsigned int offset = 0; offset < num_locations; offset++) {
1590                    interface_var v;
1591                    v.id = id;
1592                    v.type_id = type;
1593                    v.offset = offset;
1594                    out[std::make_pair(location + offset, component)] = v;
1595                }
1596            } else if (builtin == -1) {
1597                /* An interface block instance */
1598                collect_interface_block_members(my_data, dev, src, out, blocks, is_array_of_verts, id, type);
1599            }
1600        }
1601    }
1602}
1603
1604static void collect_interface_by_descriptor_slot(layer_data *my_data, VkDevice dev, shader_module const *src,
1605                                                 std::unordered_set<uint32_t> const &accessible_ids,
1606                                                 std::map<descriptor_slot_t, interface_var> &out) {
1607
1608    std::unordered_map<unsigned, unsigned> var_sets;
1609    std::unordered_map<unsigned, unsigned> var_bindings;
1610
1611    for (auto insn : *src) {
1612        /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1613         * DecorationDescriptorSet and DecorationBinding.
1614         */
1615        if (insn.opcode() == spv::OpDecorate) {
1616            if (insn.word(2) == spv::DecorationDescriptorSet) {
1617                var_sets[insn.word(1)] = insn.word(3);
1618            }
1619
1620            if (insn.word(2) == spv::DecorationBinding) {
1621                var_bindings[insn.word(1)] = insn.word(3);
1622            }
1623        }
1624    }
1625
1626    for (auto id : accessible_ids) {
1627        auto insn = src->get_def(id);
1628        assert(insn != src->end());
1629
1630        if (insn.opcode() == spv::OpVariable &&
1631            (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1632            unsigned set = value_or_default(var_sets, insn.word(2), 0);
1633            unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1634
1635            auto existing_it = out.find(std::make_pair(set, binding));
1636            if (existing_it != out.end()) {
1637                /* conflict within spv image */
1638                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1639                        __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
1640                        "var %d (type %d) in %s interface in descriptor slot (%u,%u) conflicts with existing definition",
1641                        insn.word(2), insn.word(1), storage_class_name(insn.word(3)), existing_it->first.first,
1642                        existing_it->first.second);
1643            }
1644
1645            interface_var v;
1646            v.id = insn.word(2);
1647            v.type_id = insn.word(1);
1648            out[std::make_pair(set, binding)] = v;
1649        }
1650    }
1651}
1652
1653static bool validate_interface_between_stages(layer_data *my_data, VkDevice dev, shader_module const *producer,
1654                                              spirv_inst_iter producer_entrypoint, char const *producer_name,
1655                                              shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1656                                              char const *consumer_name, bool consumer_arrayed_input) {
1657    std::map<location_t, interface_var> outputs;
1658    std::map<location_t, interface_var> inputs;
1659
1660    bool pass = true;
1661
1662    collect_interface_by_location(my_data, dev, producer, producer_entrypoint, spv::StorageClassOutput, outputs, false);
1663    collect_interface_by_location(my_data, dev, consumer, consumer_entrypoint, spv::StorageClassInput, inputs,
1664                                  consumer_arrayed_input);
1665
1666    auto a_it = outputs.begin();
1667    auto b_it = inputs.begin();
1668
1669    /* maps sorted by key (location); walk them together to find mismatches */
1670    while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1671        bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1672        bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1673        auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1674        auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1675
1676        if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1677            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
1678                        /*dev*/ 0, __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1679                        "%s writes to output location %u.%u which is not consumed by %s", producer_name, a_first.first,
1680                        a_first.second, consumer_name)) {
1681                pass = false;
1682            }
1683            a_it++;
1684        } else if (a_at_end || a_first > b_first) {
1685            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1686                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
1687                        "%s consumes input location %u.%u which is not written by %s", consumer_name, b_first.first, b_first.second,
1688                        producer_name)) {
1689                pass = false;
1690            }
1691            b_it++;
1692        } else {
1693            if (types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id, consumer_arrayed_input)) {
1694                /* OK! */
1695            } else {
1696                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1697                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1698                            a_first.first, a_first.second,
1699                            describe_type(producer, a_it->second.type_id).c_str(),
1700                            describe_type(consumer, b_it->second.type_id).c_str())) {
1701                    pass = false;
1702                }
1703            }
1704            a_it++;
1705            b_it++;
1706        }
1707    }
1708
1709    return pass;
1710}
1711
1712enum FORMAT_TYPE {
1713    FORMAT_TYPE_UNDEFINED,
1714    FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
1715    FORMAT_TYPE_SINT,
1716    FORMAT_TYPE_UINT,
1717};
1718
1719static unsigned get_format_type(VkFormat fmt) {
1720    switch (fmt) {
1721    case VK_FORMAT_UNDEFINED:
1722        return FORMAT_TYPE_UNDEFINED;
1723    case VK_FORMAT_R8_SINT:
1724    case VK_FORMAT_R8G8_SINT:
1725    case VK_FORMAT_R8G8B8_SINT:
1726    case VK_FORMAT_R8G8B8A8_SINT:
1727    case VK_FORMAT_R16_SINT:
1728    case VK_FORMAT_R16G16_SINT:
1729    case VK_FORMAT_R16G16B16_SINT:
1730    case VK_FORMAT_R16G16B16A16_SINT:
1731    case VK_FORMAT_R32_SINT:
1732    case VK_FORMAT_R32G32_SINT:
1733    case VK_FORMAT_R32G32B32_SINT:
1734    case VK_FORMAT_R32G32B32A32_SINT:
1735    case VK_FORMAT_B8G8R8_SINT:
1736    case VK_FORMAT_B8G8R8A8_SINT:
1737    case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1738    case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1739        return FORMAT_TYPE_SINT;
1740    case VK_FORMAT_R8_UINT:
1741    case VK_FORMAT_R8G8_UINT:
1742    case VK_FORMAT_R8G8B8_UINT:
1743    case VK_FORMAT_R8G8B8A8_UINT:
1744    case VK_FORMAT_R16_UINT:
1745    case VK_FORMAT_R16G16_UINT:
1746    case VK_FORMAT_R16G16B16_UINT:
1747    case VK_FORMAT_R16G16B16A16_UINT:
1748    case VK_FORMAT_R32_UINT:
1749    case VK_FORMAT_R32G32_UINT:
1750    case VK_FORMAT_R32G32B32_UINT:
1751    case VK_FORMAT_R32G32B32A32_UINT:
1752    case VK_FORMAT_B8G8R8_UINT:
1753    case VK_FORMAT_B8G8R8A8_UINT:
1754    case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1755    case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1756        return FORMAT_TYPE_UINT;
1757    default:
1758        return FORMAT_TYPE_FLOAT;
1759    }
1760}
1761
1762/* characterizes a SPIR-V type appearing in an interface to a FF stage,
1763 * for comparison to a VkFormat's characterization above. */
1764static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1765    auto insn = src->get_def(type);
1766    assert(insn != src->end());
1767
1768    switch (insn.opcode()) {
1769    case spv::OpTypeInt:
1770        return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1771    case spv::OpTypeFloat:
1772        return FORMAT_TYPE_FLOAT;
1773    case spv::OpTypeVector:
1774        return get_fundamental_type(src, insn.word(2));
1775    case spv::OpTypeMatrix:
1776        return get_fundamental_type(src, insn.word(2));
1777    case spv::OpTypeArray:
1778        return get_fundamental_type(src, insn.word(2));
1779    case spv::OpTypePointer:
1780        return get_fundamental_type(src, insn.word(3));
1781    default:
1782        return FORMAT_TYPE_UNDEFINED;
1783    }
1784}
1785
1786static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1787    uint32_t bit_pos = u_ffs(stage);
1788    return bit_pos - 1;
1789}
1790
1791static bool validate_vi_consistency(layer_data *my_data, VkDevice dev, VkPipelineVertexInputStateCreateInfo const *vi) {
1792    /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
1793     * each binding should be specified only once.
1794     */
1795    std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1796    bool pass = true;
1797
1798    for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1799        auto desc = &vi->pVertexBindingDescriptions[i];
1800        auto &binding = bindings[desc->binding];
1801        if (binding) {
1802            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1803                        __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1804                        "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1805                pass = false;
1806            }
1807        } else {
1808            binding = desc;
1809        }
1810    }
1811
1812    return pass;
1813}
1814
1815static bool validate_vi_against_vs_inputs(layer_data *my_data, VkDevice dev, VkPipelineVertexInputStateCreateInfo const *vi,
1816                                          shader_module const *vs, spirv_inst_iter entrypoint) {
1817    std::map<location_t, interface_var> inputs;
1818    bool pass = true;
1819
1820    collect_interface_by_location(my_data, dev, vs, entrypoint, spv::StorageClassInput, inputs, false);
1821
1822    /* Build index by location */
1823    std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1824    if (vi) {
1825        for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++)
1826            attribs[vi->pVertexAttributeDescriptions[i].location] = &vi->pVertexAttributeDescriptions[i];
1827    }
1828
1829    auto it_a = attribs.begin();
1830    auto it_b = inputs.begin();
1831
1832    while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1833        bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1834        bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1835        auto a_first = a_at_end ? 0 : it_a->first;
1836        auto b_first = b_at_end ? 0 : it_b->first.first;
1837        if (!a_at_end && (b_at_end || a_first < b_first)) {
1838            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
1839                        /*dev*/ 0, __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1840                        "Vertex attribute at location %d not consumed by VS", a_first)) {
1841                pass = false;
1842            }
1843            it_a++;
1844        } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1845            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1846                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "VS consumes input at location %d but not provided",
1847                        b_first)) {
1848                pass = false;
1849            }
1850            it_b++;
1851        } else {
1852            unsigned attrib_type = get_format_type(it_a->second->format);
1853            unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1854
1855            /* type checking */
1856            if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1857                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1858                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1859                            "Attribute type of `%s` at location %d does not match VS input type of `%s`",
1860                            string_VkFormat(it_a->second->format), a_first,
1861                            describe_type(vs, it_b->second.type_id).c_str())) {
1862                    pass = false;
1863                }
1864            }
1865
1866            /* OK! */
1867            it_a++;
1868            it_b++;
1869        }
1870    }
1871
1872    return pass;
1873}
1874
1875static bool validate_fs_outputs_against_render_pass(layer_data *my_data, VkDevice dev, shader_module const *fs,
1876                                                    spirv_inst_iter entrypoint, RENDER_PASS_NODE const *rp, uint32_t subpass) {
1877    const std::vector<VkFormat> &color_formats = rp->subpassColorFormats[subpass];
1878    std::map<location_t, interface_var> outputs;
1879    bool pass = true;
1880
1881    /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
1882
1883    collect_interface_by_location(my_data, dev, fs, entrypoint, spv::StorageClassOutput, outputs, false);
1884
1885    auto it = outputs.begin();
1886    uint32_t attachment = 0;
1887
1888    /* Walk attachment list and outputs together -- this is a little overpowered since attachments
1889     * are currently dense, but the parallel with matching between shader stages is nice.
1890     */
1891
1892    while ((outputs.size() > 0 && it != outputs.end()) || attachment < color_formats.size()) {
1893        if (attachment == color_formats.size() || (it != outputs.end() && it->first.first < attachment)) {
1894            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1895                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1896                        "FS writes to output location %d with no matching attachment", it->first.first)) {
1897                pass = false;
1898            }
1899            it++;
1900        } else if (it == outputs.end() || it->first.first > attachment) {
1901            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1902                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by FS", attachment)) {
1903                pass = false;
1904            }
1905            attachment++;
1906        } else {
1907            unsigned output_type = get_fundamental_type(fs, it->second.type_id);
1908            unsigned att_type = get_format_type(color_formats[attachment]);
1909
1910            /* type checking */
1911            if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
1912                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1913                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1914                            "Attachment %d of type `%s` does not match FS output type of `%s`", attachment,
1915                            string_VkFormat(color_formats[attachment]),
1916                            describe_type(fs, it->second.type_id).c_str())) {
1917                    pass = false;
1918                }
1919            }
1920
1921            /* OK! */
1922            it++;
1923            attachment++;
1924        }
1925    }
1926
1927    return pass;
1928}
1929
1930/* For some analyses, we need to know about all ids referenced by the static call tree of a particular
1931 * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint,
1932 * for example.
1933 * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
1934 *  - NOT the shader input/output interfaces.
1935 *
1936 * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
1937 * converting parts of this to be generated from the machine-readable spec instead.
1938 */
1939static void mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint, std::unordered_set<uint32_t> &ids) {
1940    std::unordered_set<uint32_t> worklist;
1941    worklist.insert(entrypoint.word(2));
1942
1943    while (!worklist.empty()) {
1944        auto id_iter = worklist.begin();
1945        auto id = *id_iter;
1946        worklist.erase(id_iter);
1947
1948        auto insn = src->get_def(id);
1949        if (insn == src->end()) {
1950            /* id is something we didnt collect in build_def_index. that's OK -- we'll stumble
1951             * across all kinds of things here that we may not care about. */
1952            continue;
1953        }
1954
1955        /* try to add to the output set */
1956        if (!ids.insert(id).second) {
1957            continue; /* if we already saw this id, we don't want to walk it again. */
1958        }
1959
1960        switch (insn.opcode()) {
1961        case spv::OpFunction:
1962            /* scan whole body of the function, enlisting anything interesting */
1963            while (++insn, insn.opcode() != spv::OpFunctionEnd) {
1964                switch (insn.opcode()) {
1965                case spv::OpLoad:
1966                case spv::OpAtomicLoad:
1967                case spv::OpAtomicExchange:
1968                case spv::OpAtomicCompareExchange:
1969                case spv::OpAtomicCompareExchangeWeak:
1970                case spv::OpAtomicIIncrement:
1971                case spv::OpAtomicIDecrement:
1972                case spv::OpAtomicIAdd:
1973                case spv::OpAtomicISub:
1974                case spv::OpAtomicSMin:
1975                case spv::OpAtomicUMin:
1976                case spv::OpAtomicSMax:
1977                case spv::OpAtomicUMax:
1978                case spv::OpAtomicAnd:
1979                case spv::OpAtomicOr:
1980                case spv::OpAtomicXor:
1981                    worklist.insert(insn.word(3)); /* ptr */
1982                    break;
1983                case spv::OpStore:
1984                case spv::OpAtomicStore:
1985                    worklist.insert(insn.word(1)); /* ptr */
1986                    break;
1987                case spv::OpAccessChain:
1988                case spv::OpInBoundsAccessChain:
1989                    worklist.insert(insn.word(3)); /* base ptr */
1990                    break;
1991                case spv::OpSampledImage:
1992                case spv::OpImageSampleImplicitLod:
1993                case spv::OpImageSampleExplicitLod:
1994                case spv::OpImageSampleDrefImplicitLod:
1995                case spv::OpImageSampleDrefExplicitLod:
1996                case spv::OpImageSampleProjImplicitLod:
1997                case spv::OpImageSampleProjExplicitLod:
1998                case spv::OpImageSampleProjDrefImplicitLod:
1999                case spv::OpImageSampleProjDrefExplicitLod:
2000                case spv::OpImageFetch:
2001                case spv::OpImageGather:
2002                case spv::OpImageDrefGather:
2003                case spv::OpImageRead:
2004                case spv::OpImage:
2005                case spv::OpImageQueryFormat:
2006                case spv::OpImageQueryOrder:
2007                case spv::OpImageQuerySizeLod:
2008                case spv::OpImageQuerySize:
2009                case spv::OpImageQueryLod:
2010                case spv::OpImageQueryLevels:
2011                case spv::OpImageQuerySamples:
2012                case spv::OpImageSparseSampleImplicitLod:
2013                case spv::OpImageSparseSampleExplicitLod:
2014                case spv::OpImageSparseSampleDrefImplicitLod:
2015                case spv::OpImageSparseSampleDrefExplicitLod:
2016                case spv::OpImageSparseSampleProjImplicitLod:
2017                case spv::OpImageSparseSampleProjExplicitLod:
2018                case spv::OpImageSparseSampleProjDrefImplicitLod:
2019                case spv::OpImageSparseSampleProjDrefExplicitLod:
2020                case spv::OpImageSparseFetch:
2021                case spv::OpImageSparseGather:
2022                case spv::OpImageSparseDrefGather:
2023                case spv::OpImageTexelPointer:
2024                    worklist.insert(insn.word(3)); /* image or sampled image */
2025                    break;
2026                case spv::OpImageWrite:
2027                    worklist.insert(insn.word(1)); /* image -- different operand order to above */
2028                    break;
2029                case spv::OpFunctionCall:
2030                    for (auto i = 3; i < insn.len(); i++) {
2031                        worklist.insert(insn.word(i)); /* fn itself, and all args */
2032                    }
2033                    break;
2034
2035                case spv::OpExtInst:
2036                    for (auto i = 5; i < insn.len(); i++) {
2037                        worklist.insert(insn.word(i)); /* operands to ext inst */
2038                    }
2039                    break;
2040                }
2041            }
2042            break;
2043        }
2044    }
2045}
2046
2047struct shader_stage_attributes {
2048    char const *const name;
2049    bool arrayed_input;
2050};
2051
2052static shader_stage_attributes shader_stage_attribs[] = {
2053    {"vertex shader", false},
2054    {"tessellation control shader", true},
2055    {"tessellation evaluation shader", false},
2056    {"geometry shader", true},
2057    {"fragment shader", false},
2058};
2059
2060static bool validate_push_constant_block_against_pipeline(layer_data *my_data, VkDevice dev,
2061                                                          std::vector<VkPushConstantRange> const *pushConstantRanges,
2062                                                          shader_module const *src, spirv_inst_iter type,
2063                                                          VkShaderStageFlagBits stage) {
2064    bool pass = true;
2065
2066    /* strip off ptrs etc */
2067    type = get_struct_type(src, type, false);
2068    assert(type != src->end());
2069
2070    /* validate directly off the offsets. this isn't quite correct for arrays
2071     * and matrices, but is a good first step. TODO: arrays, matrices, weird
2072     * sizes */
2073    for (auto insn : *src) {
2074        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
2075
2076            if (insn.word(3) == spv::DecorationOffset) {
2077                unsigned offset = insn.word(4);
2078                auto size = 4; /* bytes; TODO: calculate this based on the type */
2079
2080                bool found_range = false;
2081                for (auto const &range : *pushConstantRanges) {
2082                    if (range.offset <= offset && range.offset + range.size >= offset + size) {
2083                        found_range = true;
2084
2085                        if ((range.stageFlags & stage) == 0) {
2086                            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2087                                        /* dev */ 0, __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2088                                        "Push constant range covering variable starting at "
2089                                        "offset %u not accessible from stage %s",
2090                                        offset, string_VkShaderStageFlagBits(stage))) {
2091                                pass = false;
2092                            }
2093                        }
2094
2095                        break;
2096                    }
2097                }
2098
2099                if (!found_range) {
2100                    if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2101                                /* dev */ 0, __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
2102                                "Push constant range covering variable starting at "
2103                                "offset %u not declared in layout",
2104                                offset)) {
2105                        pass = false;
2106                    }
2107                }
2108            }
2109        }
2110    }
2111
2112    return pass;
2113}
2114
2115static bool validate_push_constant_usage(layer_data *my_data, VkDevice dev,
2116                                         std::vector<VkPushConstantRange> const *pushConstantRanges, shader_module const *src,
2117                                         std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
2118    bool pass = true;
2119
2120    for (auto id : accessible_ids) {
2121        auto def_insn = src->get_def(id);
2122        if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
2123            pass = validate_push_constant_block_against_pipeline(my_data, dev, pushConstantRanges, src,
2124                                                                 src->get_def(def_insn.word(1)), stage) &&
2125                   pass;
2126        }
2127    }
2128
2129    return pass;
2130}
2131
2132// For given pipelineLayout verify that the setLayout at slot.first
2133//  has the requested binding at slot.second
2134static VkDescriptorSetLayoutBinding const * get_descriptor_binding(layer_data *my_data, vector<VkDescriptorSetLayout> *pipelineLayout, descriptor_slot_t slot) {
2135
2136    if (!pipelineLayout)
2137        return nullptr;
2138
2139    if (slot.first >= pipelineLayout->size())
2140        return nullptr;
2141
2142    auto const layout_node = my_data->descriptorSetLayoutMap[(*pipelineLayout)[slot.first]];
2143
2144    auto bindingIt = layout_node->bindingToIndexMap.find(slot.second);
2145    if ((bindingIt == layout_node->bindingToIndexMap.end()) || (layout_node->createInfo.pBindings == NULL))
2146        return nullptr;
2147
2148    assert(bindingIt->second < layout_node->createInfo.bindingCount);
2149    return &layout_node->createInfo.pBindings[bindingIt->second];
2150}
2151
2152// Block of code at start here for managing/tracking Pipeline state that this layer cares about
2153
2154static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
2155
2156// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
2157//   Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
2158//   to that same cmd buffer by separate thread are not changing state from underneath us
2159// Track the last cmd buffer touched by this thread
2160
2161static VkBool32 hasDrawCmd(GLOBAL_CB_NODE *pCB) {
2162    for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
2163        if (pCB->drawCount[i])
2164            return VK_TRUE;
2165    }
2166    return VK_FALSE;
2167}
2168
2169// Check object status for selected flag state
2170static VkBool32 validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags enable_mask, CBStatusFlags status_mask,
2171                                CBStatusFlags status_flag, VkFlags msg_flags, DRAW_STATE_ERROR error_code, const char *fail_msg) {
2172    // If non-zero enable mask is present, check it against status but if enable_mask
2173    //  is 0 then no enable required so we should always just check status
2174    if ((!enable_mask) || (enable_mask & pNode->status)) {
2175        if ((pNode->status & status_mask) != status_flag) {
2176            // TODO : How to pass dispatchable objects as srcObject? Here src obj should be cmd buffer
2177            return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, error_code,
2178                           "DS", "CB object %#" PRIxLEAST64 ": %s", (uint64_t)(pNode->commandBuffer), fail_msg);
2179        }
2180    }
2181    return VK_FALSE;
2182}
2183
2184// Retrieve pipeline node ptr for given pipeline object
2185static PIPELINE_NODE *getPipeline(layer_data *my_data, const VkPipeline pipeline) {
2186    if (my_data->pipelineMap.find(pipeline) == my_data->pipelineMap.end()) {
2187        return NULL;
2188    }
2189    return my_data->pipelineMap[pipeline];
2190}
2191
2192// Return VK_TRUE if for a given PSO, the given state enum is dynamic, else return VK_FALSE
2193static VkBool32 isDynamic(const PIPELINE_NODE *pPipeline, const VkDynamicState state) {
2194    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2195        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2196            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
2197                return VK_TRUE;
2198        }
2199    }
2200    return VK_FALSE;
2201}
2202
2203// Validate state stored as flags at time of draw call
2204static VkBool32 validate_draw_state_flags(layer_data *my_data, GLOBAL_CB_NODE *pCB, VkBool32 indexedDraw) {
2205    VkBool32 result;
2206    result =
2207        validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_VIEWPORT_SET, CBSTATUS_VIEWPORT_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2208                        DRAWSTATE_VIEWPORT_NOT_BOUND, "Dynamic viewport state not set for this command buffer");
2209    result |=
2210        validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_SCISSOR_SET, CBSTATUS_SCISSOR_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2211                        DRAWSTATE_SCISSOR_NOT_BOUND, "Dynamic scissor state not set for this command buffer");
2212    result |= validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_LINE_WIDTH_SET, CBSTATUS_LINE_WIDTH_SET,
2213                              VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_LINE_WIDTH_NOT_BOUND,
2214                              "Dynamic line width state not set for this command buffer");
2215    result |= validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_DEPTH_BIAS_SET, CBSTATUS_DEPTH_BIAS_SET,
2216                              VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_DEPTH_BIAS_NOT_BOUND,
2217                              "Dynamic depth bias state not set for this command buffer");
2218    result |= validate_status(my_data, pCB, CBSTATUS_COLOR_BLEND_WRITE_ENABLE, CBSTATUS_BLEND_SET, CBSTATUS_BLEND_SET,
2219                              VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_BLEND_NOT_BOUND,
2220                              "Dynamic blend object state not set for this command buffer");
2221    result |= validate_status(my_data, pCB, CBSTATUS_DEPTH_WRITE_ENABLE, CBSTATUS_DEPTH_BOUNDS_SET, CBSTATUS_DEPTH_BOUNDS_SET,
2222                              VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND,
2223                              "Dynamic depth bounds state not set for this command buffer");
2224    result |= validate_status(my_data, pCB, CBSTATUS_STENCIL_TEST_ENABLE, CBSTATUS_STENCIL_READ_MASK_SET,
2225                              CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_STENCIL_NOT_BOUND,
2226                              "Dynamic stencil read mask state not set for this command buffer");
2227    result |= validate_status(my_data, pCB, CBSTATUS_STENCIL_TEST_ENABLE, CBSTATUS_STENCIL_WRITE_MASK_SET,
2228                              CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_STENCIL_NOT_BOUND,
2229                              "Dynamic stencil write mask state not set for this command buffer");
2230    result |= validate_status(my_data, pCB, CBSTATUS_STENCIL_TEST_ENABLE, CBSTATUS_STENCIL_REFERENCE_SET,
2231                              CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_STENCIL_NOT_BOUND,
2232                              "Dynamic stencil reference state not set for this command buffer");
2233    if (indexedDraw)
2234        result |= validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_INDEX_BUFFER_BOUND, CBSTATUS_INDEX_BUFFER_BOUND,
2235                                  VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_INDEX_BUFFER_NOT_BOUND,
2236                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted");
2237    return result;
2238}
2239
2240// Verify attachment reference compatibility according to spec
2241//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2242//  If both AttachmentReference arrays have requested index, check their corresponding AttachementDescriptions
2243//   to make sure that format and samples counts match.
2244//  If not, they are not compatible.
2245static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2246                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2247                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2248                                             const VkAttachmentDescription *pSecondaryAttachments) {
2249    if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2250        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment)
2251            return true;
2252    } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2253        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment)
2254            return true;
2255    } else { // format and sample count must match
2256        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2257             pSecondaryAttachments[pSecondary[index].attachment].format) &&
2258            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2259             pSecondaryAttachments[pSecondary[index].attachment].samples))
2260            return true;
2261    }
2262    // Format and sample counts didn't match
2263    return false;
2264}
2265
2266// For give primary and secondary RenderPass objects, verify that they're compatible
2267static bool verify_renderpass_compatibility(layer_data *my_data, const VkRenderPass primaryRP, const VkRenderPass secondaryRP,
2268                                            string &errorMsg) {
2269    stringstream errorStr;
2270    if (my_data->renderPassMap.find(primaryRP) == my_data->renderPassMap.end()) {
2271        errorStr << "invalid VkRenderPass (" << primaryRP << ")";
2272        errorMsg = errorStr.str();
2273        return false;
2274    } else if (my_data->renderPassMap.find(secondaryRP) == my_data->renderPassMap.end()) {
2275        errorStr << "invalid VkRenderPass (" << secondaryRP << ")";
2276        errorMsg = errorStr.str();
2277        return false;
2278    }
2279    // Trivial pass case is exact same RP
2280    if (primaryRP == secondaryRP) {
2281        return true;
2282    }
2283    const VkRenderPassCreateInfo *primaryRPCI = my_data->renderPassMap[primaryRP]->pCreateInfo;
2284    const VkRenderPassCreateInfo *secondaryRPCI = my_data->renderPassMap[secondaryRP]->pCreateInfo;
2285    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2286        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2287                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2288        errorMsg = errorStr.str();
2289        return false;
2290    }
2291    uint32_t spIndex = 0;
2292    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2293        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2294        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2295        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2296        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2297        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2298            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2299                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2300                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2301                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2302                errorMsg = errorStr.str();
2303                return false;
2304            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2305                                                         primaryColorCount, primaryRPCI->pAttachments,
2306                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2307                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2308                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2309                errorMsg = errorStr.str();
2310                return false;
2311            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2312                                                         primaryColorCount, primaryRPCI->pAttachments,
2313                                                         secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2314                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2315                errorStr << "depth/stencil attachments at index " << cIdx << " of subpass index " << spIndex
2316                         << " are not compatible.";
2317                errorMsg = errorStr.str();
2318                return false;
2319            }
2320        }
2321        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2322        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2323        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2324        for (uint32_t i = 0; i < inputMax; ++i) {
2325            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2326                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2327                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2328                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2329                errorMsg = errorStr.str();
2330                return false;
2331            }
2332        }
2333    }
2334    return true;
2335}
2336
2337// For give SET_NODE, verify that its Set is compatible w/ the setLayout corresponding to pipelineLayout[layoutIndex]
2338static bool verify_set_layout_compatibility(layer_data *my_data, const SET_NODE *pSet, const VkPipelineLayout layout,
2339                                            const uint32_t layoutIndex, string &errorMsg) {
2340    stringstream errorStr;
2341    auto pipeline_layout_it = my_data->pipelineLayoutMap.find(layout);
2342    if (pipeline_layout_it == my_data->pipelineLayoutMap.end()) {
2343        errorStr << "invalid VkPipelineLayout (" << layout << ")";
2344        errorMsg = errorStr.str();
2345        return false;
2346    }
2347    if (layoutIndex >= pipeline_layout_it->second.descriptorSetLayouts.size()) {
2348        errorStr << "VkPipelineLayout (" << layout << ") only contains " << pipeline_layout_it->second.descriptorSetLayouts.size()
2349                 << " setLayouts corresponding to sets 0-" << pipeline_layout_it->second.descriptorSetLayouts.size() - 1
2350                 << ", but you're attempting to bind set to index " << layoutIndex;
2351        errorMsg = errorStr.str();
2352        return false;
2353    }
2354    // Get the specific setLayout from PipelineLayout that overlaps this set
2355    LAYOUT_NODE *pLayoutNode = my_data->descriptorSetLayoutMap[pipeline_layout_it->second.descriptorSetLayouts[layoutIndex]];
2356    if (pLayoutNode->layout == pSet->pLayout->layout) { // trivial pass case
2357        return true;
2358    }
2359    size_t descriptorCount = pLayoutNode->descriptorTypes.size();
2360    if (descriptorCount != pSet->pLayout->descriptorTypes.size()) {
2361        errorStr << "setLayout " << layoutIndex << " from pipelineLayout " << layout << " has " << descriptorCount
2362                 << " descriptors, but corresponding set being bound has " << pSet->pLayout->descriptorTypes.size()
2363                 << " descriptors.";
2364        errorMsg = errorStr.str();
2365        return false; // trivial fail case
2366    }
2367    // Now need to check set against corresponding pipelineLayout to verify compatibility
2368    for (size_t i = 0; i < descriptorCount; ++i) {
2369        // Need to verify that layouts are identically defined
2370        //  TODO : Is below sufficient? Making sure that types & stageFlags match per descriptor
2371        //    do we also need to check immutable samplers?
2372        if (pLayoutNode->descriptorTypes[i] != pSet->pLayout->descriptorTypes[i]) {
2373            errorStr << "descriptor " << i << " for descriptorSet being bound is type '"
2374                     << string_VkDescriptorType(pSet->pLayout->descriptorTypes[i])
2375                     << "' but corresponding descriptor from pipelineLayout is type '"
2376                     << string_VkDescriptorType(pLayoutNode->descriptorTypes[i]) << "'";
2377            errorMsg = errorStr.str();
2378            return false;
2379        }
2380        if (pLayoutNode->stageFlags[i] != pSet->pLayout->stageFlags[i]) {
2381            errorStr << "stageFlags " << i << " for descriptorSet being bound is " << pSet->pLayout->stageFlags[i]
2382                     << "' but corresponding descriptor from pipelineLayout has stageFlags " << pLayoutNode->stageFlags[i];
2383            errorMsg = errorStr.str();
2384            return false;
2385        }
2386    }
2387    return true;
2388}
2389
2390// Validate that data for each specialization entry is fully contained within the buffer.
2391static VkBool32 validate_specialization_offsets(layer_data *my_data, VkPipelineShaderStageCreateInfo const *info) {
2392    VkBool32 pass = VK_TRUE;
2393
2394    VkSpecializationInfo const *spec = info->pSpecializationInfo;
2395
2396    if (spec) {
2397        for (auto i = 0u; i < spec->mapEntryCount; i++) {
2398            if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2399                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2400                            /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC",
2401                            "Specialization entry %u (for constant id %u) references memory outside provided "
2402                            "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2403                            " bytes provided)",
2404                            i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2405                            spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) {
2406
2407                    pass = VK_FALSE;
2408                }
2409            }
2410        }
2411    }
2412
2413    return pass;
2414}
2415
2416static bool descriptor_type_match(layer_data *my_data, shader_module const *module, uint32_t type_id,
2417                                  VkDescriptorType descriptor_type, unsigned &descriptor_count) {
2418    auto type = module->get_def(type_id);
2419
2420    descriptor_count = 1;
2421
2422    /* Strip off any array or ptrs. Where we remove array levels, adjust the
2423     * descriptor count for each dimension. */
2424    while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2425        if (type.opcode() == spv::OpTypeArray) {
2426            descriptor_count *= get_constant_value(module, type.word(3));
2427            type = module->get_def(type.word(2));
2428        }
2429        else {
2430            type = module->get_def(type.word(3));
2431        }
2432    }
2433
2434    switch (type.opcode()) {
2435    case spv::OpTypeStruct: {
2436        for (auto insn : *module) {
2437            if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2438                if (insn.word(2) == spv::DecorationBlock) {
2439                    return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2440                           descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2441                } else if (insn.word(2) == spv::DecorationBufferBlock) {
2442                    return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2443                           descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2444                }
2445            }
2446        }
2447
2448        /* Invalid */
2449        return false;
2450    }
2451
2452    case spv::OpTypeSampler:
2453        return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER;
2454
2455    case spv::OpTypeSampledImage:
2456        return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2457
2458    case spv::OpTypeImage: {
2459        /* Many descriptor types backing image types-- depends on dimension
2460         * and whether the image will be used with a sampler. SPIRV for
2461         * Vulkan requires that sampled be 1 or 2 -- leaving the decision to
2462         * runtime is unacceptable.
2463         */
2464        auto dim = type.word(3);
2465        auto sampled = type.word(7);
2466
2467        if (dim == spv::DimSubpassData) {
2468            return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2469        } else if (dim == spv::DimBuffer) {
2470            if (sampled == 1) {
2471                return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2472            } else {
2473                return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2474            }
2475        } else if (sampled == 1) {
2476            return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
2477        } else {
2478            return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2479        }
2480    }
2481
2482    /* We shouldn't really see any other junk types -- but if we do, they're
2483     * a mismatch.
2484     */
2485    default:
2486        return false; /* Mismatch */
2487    }
2488}
2489
2490static VkBool32 require_feature(layer_data *my_data, VkBool32 feature, char const *feature_name) {
2491    if (!feature) {
2492        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2493                    /* dev */ 0, __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2494                    "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2495                    "enabled on the device",
2496                    feature_name)) {
2497            return false;
2498        }
2499    }
2500
2501    return true;
2502}
2503
2504static VkBool32 validate_shader_capabilities(layer_data *my_data, VkDevice dev, shader_module const *src)
2505{
2506    VkBool32 pass = VK_TRUE;
2507
2508    auto enabledFeatures = &my_data->physDevProperties.features;
2509
2510    for (auto insn : *src) {
2511        if (insn.opcode() == spv::OpCapability) {
2512            switch (insn.word(1)) {
2513            case spv::CapabilityMatrix:
2514            case spv::CapabilityShader:
2515            case spv::CapabilityInputAttachment:
2516            case spv::CapabilitySampled1D:
2517            case spv::CapabilityImage1D:
2518            case spv::CapabilitySampledBuffer:
2519            case spv::CapabilityImageBuffer:
2520            case spv::CapabilityImageQuery:
2521            case spv::CapabilityDerivativeControl:
2522                // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2523                break;
2524
2525            case spv::CapabilityGeometry:
2526                pass &= require_feature(my_data, enabledFeatures->geometryShader, "geometryShader");
2527                break;
2528
2529            case spv::CapabilityTessellation:
2530                pass &= require_feature(my_data, enabledFeatures->tessellationShader, "tessellationShader");
2531                break;
2532
2533            case spv::CapabilityFloat64:
2534                pass &= require_feature(my_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2535                break;
2536
2537            case spv::CapabilityInt64:
2538                pass &= require_feature(my_data, enabledFeatures->shaderInt64, "shaderInt64");
2539                break;
2540
2541            case spv::CapabilityTessellationPointSize:
2542            case spv::CapabilityGeometryPointSize:
2543                pass &= require_feature(my_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2544                                        "shaderTessellationAndGeometryPointSize");
2545                break;
2546
2547            case spv::CapabilityImageGatherExtended:
2548                pass &= require_feature(my_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2549                break;
2550
2551            case spv::CapabilityStorageImageMultisample:
2552                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2553                break;
2554
2555            case spv::CapabilityUniformBufferArrayDynamicIndexing:
2556                pass &= require_feature(my_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2557                                        "shaderUniformBufferArrayDynamicIndexing");
2558                break;
2559
2560            case spv::CapabilitySampledImageArrayDynamicIndexing:
2561                pass &= require_feature(my_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2562                                        "shaderSampledImageArrayDynamicIndexing");
2563                break;
2564
2565            case spv::CapabilityStorageBufferArrayDynamicIndexing:
2566                pass &= require_feature(my_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2567                                        "shaderStorageBufferArrayDynamicIndexing");
2568                break;
2569
2570            case spv::CapabilityStorageImageArrayDynamicIndexing:
2571                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2572                                        "shaderStorageImageArrayDynamicIndexing");
2573                break;
2574
2575            case spv::CapabilityClipDistance:
2576                pass &= require_feature(my_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2577                break;
2578
2579            case spv::CapabilityCullDistance:
2580                pass &= require_feature(my_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2581                break;
2582
2583            case spv::CapabilityImageCubeArray:
2584                pass &= require_feature(my_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2585                break;
2586
2587            case spv::CapabilitySampleRateShading:
2588                pass &= require_feature(my_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2589                break;
2590
2591            case spv::CapabilitySparseResidency:
2592                pass &= require_feature(my_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2593                break;
2594
2595            case spv::CapabilityMinLod:
2596                pass &= require_feature(my_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2597                break;
2598
2599            case spv::CapabilitySampledCubeArray:
2600                pass &= require_feature(my_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2601                break;
2602
2603            case spv::CapabilityImageMSArray:
2604                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2605                break;
2606
2607            case spv::CapabilityStorageImageExtendedFormats:
2608                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageExtendedFormats,
2609                                        "shaderStorageImageExtendedFormats");
2610                break;
2611
2612            case spv::CapabilityInterpolationFunction:
2613                pass &= require_feature(my_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2614                break;
2615
2616            case spv::CapabilityStorageImageReadWithoutFormat:
2617                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2618                                        "shaderStorageImageReadWithoutFormat");
2619                break;
2620
2621            case spv::CapabilityStorageImageWriteWithoutFormat:
2622                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2623                                        "shaderStorageImageWriteWithoutFormat");
2624                break;
2625
2626            case spv::CapabilityMultiViewport:
2627                pass &= require_feature(my_data, enabledFeatures->multiViewport, "multiViewport");
2628                break;
2629
2630            default:
2631                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /* dev */0,
2632                            __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2633                            "Shader declares capability %u, not supported in Vulkan.",
2634                            insn.word(1)))
2635                    pass = VK_FALSE;
2636                break;
2637            }
2638        }
2639    }
2640
2641    return pass;
2642}
2643
2644
2645// Validate that the shaders used by the given pipeline and store the active_slots
2646//  that are actually used by the pipeline into pPipeline->active_slots
2647static VkBool32 validate_and_capture_pipeline_shader_state(layer_data *my_data, const VkDevice dev, PIPELINE_NODE *pPipeline) {
2648    VkGraphicsPipelineCreateInfo const *pCreateInfo = &pPipeline->graphicsPipelineCI;
2649    /* We seem to allow pipeline stages to be specified out of order, so collect and identify them
2650     * before trying to do anything more: */
2651    int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2652    int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2653
2654    shader_module *shaders[5];
2655    memset(shaders, 0, sizeof(shaders));
2656    spirv_inst_iter entrypoints[5];
2657    memset(entrypoints, 0, sizeof(entrypoints));
2658    RENDER_PASS_NODE const *rp = 0;
2659    VkPipelineVertexInputStateCreateInfo const *vi = 0;
2660    VkBool32 pass = VK_TRUE;
2661
2662    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2663        VkPipelineShaderStageCreateInfo const *pStage = &pCreateInfo->pStages[i];
2664        if (pStage->sType == VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO) {
2665
2666            if ((pStage->stage & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT |
2667                                  VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) == 0) {
2668                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2669                            /*dev*/ 0, __LINE__, SHADER_CHECKER_UNKNOWN_STAGE, "SC", "Unknown shader stage %d", pStage->stage)) {
2670                    pass = VK_FALSE;
2671                }
2672            } else {
2673                pass = validate_specialization_offsets(my_data, pStage) && pass;
2674
2675                auto stage_id = get_shader_stage_id(pStage->stage);
2676                auto module = my_data->shaderModuleMap[pStage->module].get();
2677                shaders[stage_id] = module;
2678
2679                /* find the entrypoint */
2680                entrypoints[stage_id] = find_entrypoint(module, pStage->pName, pStage->stage);
2681                if (entrypoints[stage_id] == module->end()) {
2682                    if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2683                                /*dev*/ 0, __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC",
2684                                "No entrypoint found named `%s` for stage %s", pStage->pName,
2685                                string_VkShaderStageFlagBits(pStage->stage))) {
2686                        pass = VK_FALSE;
2687                    }
2688                }
2689
2690                /* validate shader capabilities against enabled device features */
2691                pass = validate_shader_capabilities(my_data, dev, module) && pass;
2692
2693                /* mark accessible ids */
2694                std::unordered_set<uint32_t> accessible_ids;
2695                mark_accessible_ids(module, entrypoints[stage_id], accessible_ids);
2696
2697                /* validate descriptor set layout against what the entrypoint actually uses */
2698                std::map<descriptor_slot_t, interface_var> descriptor_uses;
2699                collect_interface_by_descriptor_slot(my_data, dev, module, accessible_ids, descriptor_uses);
2700
2701                auto layouts = pCreateInfo->layout != VK_NULL_HANDLE
2702                                   ? &(my_data->pipelineLayoutMap[pCreateInfo->layout].descriptorSetLayouts)
2703                                   : nullptr;
2704
2705                for (auto use : descriptor_uses) {
2706                    // While validating shaders capture which slots are used by the pipeline
2707                    pPipeline->active_slots[use.first.first].insert(use.first.second);
2708
2709                    /* find the matching binding */
2710                    auto binding = get_descriptor_binding(my_data, layouts, use.first);
2711                    unsigned required_descriptor_count;
2712
2713                    if (!binding) {
2714                        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2715                                    /*dev*/ 0, __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2716                                    "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2717                                    use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2718                            pass = VK_FALSE;
2719                        }
2720                    } else if (~binding->stageFlags & pStage->stage) {
2721                        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2722                                    /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2723                                    "Shader uses descriptor slot %u.%u (used "
2724                                    "as type `%s`) but descriptor not "
2725                                    "accessible from stage %s",
2726                                    use.first.first, use.first.second,
2727                                    describe_type(module, use.second.type_id).c_str(),
2728                                    string_VkShaderStageFlagBits(pStage->stage))) {
2729                            pass = VK_FALSE;
2730                        }
2731                    } else if (!descriptor_type_match(my_data, module, use.second.type_id, binding->descriptorType, /*out*/ required_descriptor_count)) {
2732                        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2733                                    /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2734                                    "Type mismatch on descriptor slot "
2735                                    "%u.%u (used as type `%s`) but "
2736                                    "descriptor of type %s",
2737                                    use.first.first, use.first.second,
2738                                    describe_type(module, use.second.type_id).c_str(),
2739                                    string_VkDescriptorType(binding->descriptorType))) {
2740                            pass = VK_FALSE;
2741                        }
2742                    } else if (binding->descriptorCount < required_descriptor_count) {
2743                        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2744                                    /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2745                                    "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2746                                    required_descriptor_count, use.first.first, use.first.second,
2747                                    describe_type(module, use.second.type_id).c_str(),
2748                                    binding->descriptorCount)) {
2749                            pass = VK_FALSE;
2750                        }
2751                    }
2752                }
2753
2754                /* validate push constant usage */
2755                pass =
2756                    validate_push_constant_usage(my_data, dev, &my_data->pipelineLayoutMap[pCreateInfo->layout].pushConstantRanges,
2757                                                 module, accessible_ids, pStage->stage) &&
2758                    pass;
2759            }
2760        }
2761    }
2762
2763    if (pCreateInfo->renderPass != VK_NULL_HANDLE)
2764        rp = my_data->renderPassMap[pCreateInfo->renderPass];
2765
2766    vi = pCreateInfo->pVertexInputState;
2767
2768    if (vi) {
2769        pass = validate_vi_consistency(my_data, dev, vi) && pass;
2770    }
2771
2772    if (shaders[vertex_stage]) {
2773        pass = validate_vi_against_vs_inputs(my_data, dev, vi, shaders[vertex_stage], entrypoints[vertex_stage]) && pass;
2774    }
2775
2776    /* TODO: enforce rules about present combinations of shaders */
2777    int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2778    int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2779
2780    while (!shaders[producer] && producer != fragment_stage) {
2781        producer++;
2782        consumer++;
2783    }
2784
2785    for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2786        assert(shaders[producer]);
2787        if (shaders[consumer]) {
2788            pass = validate_interface_between_stages(my_data, dev, shaders[producer], entrypoints[producer],
2789                                                     shader_stage_attribs[producer].name, shaders[consumer], entrypoints[consumer],
2790                                                     shader_stage_attribs[consumer].name,
2791                                                     shader_stage_attribs[consumer].arrayed_input) &&
2792                   pass;
2793
2794            producer = consumer;
2795        }
2796    }
2797
2798    if (shaders[fragment_stage] && rp) {
2799        pass = validate_fs_outputs_against_render_pass(my_data, dev, shaders[fragment_stage], entrypoints[fragment_stage], rp,
2800                                                       pCreateInfo->subpass) &&
2801               pass;
2802    }
2803
2804    return pass;
2805}
2806
2807// Return Set node ptr for specified set or else NULL
2808static SET_NODE *getSetNode(layer_data *my_data, const VkDescriptorSet set) {
2809    if (my_data->setMap.find(set) == my_data->setMap.end()) {
2810        return NULL;
2811    }
2812    return my_data->setMap[set];
2813}
2814
2815// For given Layout Node and binding, return index where that binding begins
2816static uint32_t getBindingStartIndex(const LAYOUT_NODE *pLayout, const uint32_t binding) {
2817    uint32_t offsetIndex = 0;
2818    for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
2819        if (pLayout->createInfo.pBindings[i].binding == binding)
2820            break;
2821        offsetIndex += pLayout->createInfo.pBindings[i].descriptorCount;
2822    }
2823    return offsetIndex;
2824}
2825
2826// For given layout node and binding, return last index that is updated
2827static uint32_t getBindingEndIndex(const LAYOUT_NODE *pLayout, const uint32_t binding) {
2828    uint32_t offsetIndex = 0;
2829    for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
2830        offsetIndex += pLayout->createInfo.pBindings[i].descriptorCount;
2831        if (pLayout->createInfo.pBindings[i].binding == binding)
2832            break;
2833    }
2834    return offsetIndex - 1;
2835}
2836
2837// For the given command buffer, verify that for each set in activeSetBindingsPairs
2838//  that any dynamic descriptor in that set has a valid dynamic offset bound.
2839//  To be valid, the dynamic offset combined with the offset and range from its
2840//  descriptor update must not overflow the size of its buffer being updated
2841static VkBool32 validate_dynamic_offsets(layer_data *my_data, const GLOBAL_CB_NODE *pCB,
2842                                         const vector<std::pair<SET_NODE *, unordered_set<uint32_t>>> &activeSetBindingsPairs) {
2843    VkBool32 result = VK_FALSE;
2844
2845    VkWriteDescriptorSet *pWDS = NULL;
2846    uint32_t dynOffsetIndex = 0;
2847    VkDeviceSize bufferSize = 0;
2848    for (auto set_bindings_pair : activeSetBindingsPairs) {
2849        SET_NODE *set_node = set_bindings_pair.first;
2850        LAYOUT_NODE *layout_node = set_node->pLayout;
2851        for (auto binding : set_bindings_pair.second) {
2852            uint32_t startIdx = getBindingStartIndex(layout_node, binding);
2853            uint32_t endIdx = getBindingEndIndex(layout_node, binding);
2854            for (uint32_t i = startIdx; i <= endIdx; ++i) {
2855                // TODO : Flag error here if set_node->pDescriptorUpdates[i] is NULL
2856                switch (set_node->pDescriptorUpdates[i]->sType) {
2857                case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
2858                    pWDS = (VkWriteDescriptorSet *)set_node->pDescriptorUpdates[i];
2859                    if ((pWDS->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
2860                        (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
2861                        for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2862                            bufferSize = my_data->bufferMap[pWDS->pBufferInfo[j].buffer].create_info->size;
2863                            uint32_t dynOffset = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].dynamicOffsets[dynOffsetIndex];
2864                            if (pWDS->pBufferInfo[j].range == VK_WHOLE_SIZE) {
2865                                if ((dynOffset + pWDS->pBufferInfo[j].offset) > bufferSize) {
2866                                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2867                                                      VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2868                                                      reinterpret_cast<const uint64_t &>(set_node->set), __LINE__,
2869                                                      DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, "DS",
2870                                                      "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has range of "
2871                                                      "VK_WHOLE_SIZE but dynamic offset %#" PRIxLEAST32 ". "
2872                                                      "combined with offset %#" PRIxLEAST64 " oversteps its buffer (%#" PRIxLEAST64
2873                                                      ") which has a size of %#" PRIxLEAST64 ".",
2874                                                      reinterpret_cast<const uint64_t &>(set_node->set), i,
2875                                                      pCB->dynamicOffsets[dynOffsetIndex], pWDS->pBufferInfo[j].offset,
2876                                                      reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize);
2877                                }
2878                            } else if ((dynOffset + pWDS->pBufferInfo[j].offset + pWDS->pBufferInfo[j].range) > bufferSize) {
2879                                result |= log_msg(
2880                                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2881                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2882                                    reinterpret_cast<const uint64_t &>(set_node->set), __LINE__, DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW,
2883                                    "DS",
2884                                    "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has dynamic offset %#" PRIxLEAST32 ". "
2885                                    "Combined with offset %#" PRIxLEAST64 " and range %#" PRIxLEAST64
2886                                    " from its update, this oversteps its buffer "
2887                                    "(%#" PRIxLEAST64 ") which has a size of %#" PRIxLEAST64 ".",
2888                                    reinterpret_cast<const uint64_t &>(set_node->set), i, pCB->dynamicOffsets[dynOffsetIndex],
2889                                    pWDS->pBufferInfo[j].offset, pWDS->pBufferInfo[j].range,
2890                                    reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize);
2891                            } else if ((dynOffset + pWDS->pBufferInfo[j].offset + pWDS->pBufferInfo[j].range) > bufferSize) {
2892                                result |= log_msg(
2893                                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2894                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2895                                    reinterpret_cast<const uint64_t &>(set_node->set), __LINE__, DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW,
2896                                    "DS",
2897                                    "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has dynamic offset %#" PRIxLEAST32 ". "
2898                                    "Combined with offset %#" PRIxLEAST64 " and range %#" PRIxLEAST64
2899                                    " from its update, this oversteps its buffer "
2900                                    "(%#" PRIxLEAST64 ") which has a size of %#" PRIxLEAST64 ".",
2901                                    reinterpret_cast<const uint64_t &>(set_node->set), i, pCB->dynamicOffsets[dynOffsetIndex],
2902                                    pWDS->pBufferInfo[j].offset, pWDS->pBufferInfo[j].range,
2903                                    reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize);
2904                            }
2905                            dynOffsetIndex++;
2906                            i += j; // Advance i to end of this set of descriptors (++i at end of for loop will move 1 index past
2907                                    // last of these descriptors)
2908                        }
2909                    }
2910                    break;
2911                default: // Currently only shadowing Write update nodes so shouldn't get here
2912                    assert(0);
2913                    continue;
2914                }
2915            }
2916        }
2917    }
2918    return result;
2919}
2920
2921// Validate overall state at the time of a draw call
2922static VkBool32 validate_draw_state(layer_data *my_data, GLOBAL_CB_NODE *pCB, VkBool32 indexedDraw) {
2923    // First check flag states
2924    VkBool32 result = validate_draw_state_flags(my_data, pCB, indexedDraw);
2925    PIPELINE_NODE *pPipe = getPipeline(my_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
2926    // Now complete other state checks
2927    // TODO : Currently only performing next check if *something* was bound (non-zero last bound)
2928    //  There is probably a better way to gate when this check happens, and to know if something *should* have been bound
2929    //  We should have that check separately and then gate this check based on that check
2930    if (pPipe) {
2931        auto const &state = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS];
2932        if (state.pipelineLayout) {
2933            string errorString;
2934            // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets
2935            vector<std::pair<SET_NODE *, unordered_set<uint32_t>>> activeSetBindingsPairs;
2936            for (auto setBindingPair : pPipe->active_slots) {
2937                uint32_t setIndex = setBindingPair.first;
2938                // If valid set is not bound throw an error
2939                if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
2940                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2941                                      __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
2942                                      "VkPipeline %#" PRIxLEAST64 " uses set #%u but that set is not bound.",
2943                                      (uint64_t)pPipe->pipeline, setIndex);
2944                } else if (!verify_set_layout_compatibility(my_data, my_data->setMap[state.boundDescriptorSets[setIndex]],
2945                                                            pPipe->graphicsPipelineCI.layout, setIndex, errorString)) {
2946                    // Set is bound but not compatible w/ overlapping pipelineLayout from PSO
2947                    VkDescriptorSet setHandle = my_data->setMap[state.boundDescriptorSets[setIndex]]->set;
2948                    result |= log_msg(
2949                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2950                        (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
2951                        "VkDescriptorSet (%#" PRIxLEAST64
2952                        ") bound as set #%u is not compatible with overlapping VkPipelineLayout %#" PRIxLEAST64 " due to: %s",
2953                        (uint64_t)setHandle, setIndex, (uint64_t)pPipe->graphicsPipelineCI.layout, errorString.c_str());
2954                } else { // Valid set is bound and layout compatible, validate that it's updated and verify any dynamic offsets
2955                    // Pull the set node
2956                    SET_NODE *pSet = my_data->setMap[state.boundDescriptorSets[setIndex]];
2957                    // Save vector of all active sets to verify dynamicOffsets below
2958                    // activeSetNodes.push_back(pSet);
2959                    activeSetBindingsPairs.push_back(std::make_pair(pSet, setBindingPair.second));
2960                    // Make sure set has been updated
2961                    if (!pSet->pUpdateStructs) {
2962                        result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2963                                          VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pSet->set, __LINE__,
2964                                          DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2965                                          "DS %#" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so "
2966                                                              "this will result in undefined behavior.",
2967                                          (uint64_t)pSet->set);
2968                    }
2969                }
2970            }
2971            // For each dynamic descriptor, make sure dynamic offset doesn't overstep buffer
2972            if (!state.dynamicOffsets.empty())
2973                result |= validate_dynamic_offsets(my_data, pCB, activeSetBindingsPairs);
2974        }
2975        // Verify Vtx binding
2976        if (pPipe->vertexBindingDescriptions.size() > 0) {
2977            for (size_t i = 0; i < pPipe->vertexBindingDescriptions.size(); i++) {
2978                if ((pCB->currentDrawData.buffers.size() < (i + 1)) || (pCB->currentDrawData.buffers[i] == VK_NULL_HANDLE)) {
2979                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2980                                      __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2981                                      "The Pipeline State Object (%#" PRIxLEAST64
2982                                      ") expects that this Command Buffer's vertex binding Index " PRINTF_SIZE_T_SPECIFIER
2983                                      " should be set via vkCmdBindVertexBuffers.",
2984                                      (uint64_t)pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline, i);
2985                }
2986            }
2987        } else {
2988            if (!pCB->currentDrawData.buffers.empty()) {
2989                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
2990                                  0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2991                                  "Vertex buffers are bound to command buffer (%#" PRIxLEAST64
2992                                  ") but no vertex buffers are attached to this Pipeline State Object (%#" PRIxLEAST64 ").",
2993                                  (uint64_t)pCB->commandBuffer, (uint64_t)pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
2994            }
2995        }
2996        // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
2997        // Skip check if rasterization is disabled or there is no viewport.
2998        if ((!pPipe->graphicsPipelineCI.pRasterizationState ||
2999             !pPipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) &&
3000            pPipe->graphicsPipelineCI.pViewportState) {
3001            VkBool32 dynViewport = isDynamic(pPipe, VK_DYNAMIC_STATE_VIEWPORT);
3002            VkBool32 dynScissor = isDynamic(pPipe, VK_DYNAMIC_STATE_SCISSOR);
3003            if (dynViewport) {
3004                if (pCB->viewports.size() != pPipe->graphicsPipelineCI.pViewportState->viewportCount) {
3005                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3006                                      __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3007                                      "Dynamic viewportCount from vkCmdSetViewport() is " PRINTF_SIZE_T_SPECIFIER
3008                                      ", but PSO viewportCount is %u. These counts must match.",
3009                                      pCB->viewports.size(), pPipe->graphicsPipelineCI.pViewportState->viewportCount);
3010                }
3011            }
3012            if (dynScissor) {
3013                if (pCB->scissors.size() != pPipe->graphicsPipelineCI.pViewportState->scissorCount) {
3014                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3015                                      __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3016                                      "Dynamic scissorCount from vkCmdSetScissor() is " PRINTF_SIZE_T_SPECIFIER
3017                                      ", but PSO scissorCount is %u. These counts must match.",
3018                                      pCB->scissors.size(), pPipe->graphicsPipelineCI.pViewportState->scissorCount);
3019                }
3020            }
3021        }
3022    }
3023    return result;
3024}
3025
3026// Verify that create state for a pipeline is valid
3027static VkBool32 verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_NODE *> pPipelines,
3028                                          int pipelineIndex) {
3029    VkBool32 skipCall = VK_FALSE;
3030
3031    PIPELINE_NODE *pPipeline = pPipelines[pipelineIndex];
3032
3033    // If create derivative bit is set, check that we've specified a base
3034    // pipeline correctly, and that the base pipeline was created to allow
3035    // derivatives.
3036    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
3037        PIPELINE_NODE *pBasePipeline = nullptr;
3038        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
3039              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
3040            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3041                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3042                                "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
3043        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
3044            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
3045                skipCall |=
3046                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3047                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3048                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
3049            } else {
3050                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
3051            }
3052        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
3053            pBasePipeline = getPipeline(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
3054        }
3055
3056        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
3057            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3058                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3059                                "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
3060        }
3061    }
3062
3063    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
3064        if (!my_data->physDevProperties.features.independentBlend) {
3065            if (pPipeline->attachments.size() > 0) {
3066                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
3067                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
3068                    if ((pAttachments[0].blendEnable != pAttachments[i].blendEnable) ||
3069                        (pAttachments[0].srcColorBlendFactor != pAttachments[i].srcColorBlendFactor) ||
3070                        (pAttachments[0].dstColorBlendFactor != pAttachments[i].dstColorBlendFactor) ||
3071                        (pAttachments[0].colorBlendOp != pAttachments[i].colorBlendOp) ||
3072                        (pAttachments[0].srcAlphaBlendFactor != pAttachments[i].srcAlphaBlendFactor) ||
3073                        (pAttachments[0].dstAlphaBlendFactor != pAttachments[i].dstAlphaBlendFactor) ||
3074                        (pAttachments[0].alphaBlendOp != pAttachments[i].alphaBlendOp) ||
3075                        (pAttachments[0].colorWriteMask != pAttachments[i].colorWriteMask)) {
3076                        skipCall |=
3077                            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3078                            DRAWSTATE_INDEPENDENT_BLEND, "DS", "Invalid Pipeline CreateInfo: If independent blend feature not "
3079                            "enabled, all elements of pAttachments must be identical");
3080                    }
3081                }
3082            }
3083        }
3084        if (!my_data->physDevProperties.features.logicOp &&
3085            (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
3086            skipCall |=
3087                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3088                        DRAWSTATE_DISABLED_LOGIC_OP, "DS",
3089                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE");
3090        }
3091        if ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable == VK_TRUE) &&
3092            ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOp < VK_LOGIC_OP_CLEAR) ||
3093             (pPipeline->graphicsPipelineCI.pColorBlendState->logicOp > VK_LOGIC_OP_SET))) {
3094            skipCall |=
3095                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3096                        DRAWSTATE_INVALID_LOGIC_OP, "DS",
3097                        "Invalid Pipeline CreateInfo: If logicOpEnable is VK_TRUE, logicOp must be a valid VkLogicOp value");
3098        }
3099    }
3100
3101    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
3102    // produces nonsense errors that confuse users. Other layers should already
3103    // emit errors for renderpass being invalid.
3104    auto rp_data = my_data->renderPassMap.find(pPipeline->graphicsPipelineCI.renderPass);
3105    if (rp_data != my_data->renderPassMap.end() &&
3106        pPipeline->graphicsPipelineCI.subpass >= rp_data->second->pCreateInfo->subpassCount) {
3107        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3108                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
3109                                                                           "is out of range for this renderpass (0..%u)",
3110                            pPipeline->graphicsPipelineCI.subpass, rp_data->second->pCreateInfo->subpassCount - 1);
3111    }
3112
3113    if (!validate_and_capture_pipeline_shader_state(my_data, device, pPipeline)) {
3114        skipCall = VK_TRUE;
3115    }
3116    // VS is required
3117    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3118        skipCall |=
3119            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3120                    DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vtx Shader required");
3121    }
3122    // Either both or neither TC/TE shaders should be defined
3123    if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) !=
3124        ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) {
3125        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3126                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3127                            "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair");
3128    }
3129    // Compute shaders should be specified independent of Gfx shaders
3130    if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) &&
3131        (pPipeline->active_shaders &
3132         (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
3133          VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) {
3134        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3135                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3136                            "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline");
3137    }
3138    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3139    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3140    if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3141        (pPipeline->iaStateCI.topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3142        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3143                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3144                                                                           "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3145                                                                           "topology for tessellation pipelines");
3146    }
3147    if (pPipeline->iaStateCI.topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3148        if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3149            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3150                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3151                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3152                                                                               "topology is only valid for tessellation pipelines");
3153        }
3154        if (!pPipeline->tessStateCI.patchControlPoints || (pPipeline->tessStateCI.patchControlPoints > 32)) {
3155            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3156                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3157                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3158                                                                               "topology used with patchControlPoints value %u."
3159                                                                               " patchControlPoints should be >0 and <=32.",
3160                                pPipeline->tessStateCI.patchControlPoints);
3161        }
3162    }
3163    // Viewport state must be included if rasterization is enabled.
3164    // If the viewport state is included, the viewport and scissor counts should always match.
3165    // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler
3166    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3167        !pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
3168        if (!pPipeline->graphicsPipelineCI.pViewportState) {
3169            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3170                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport "
3171                                                                           "and scissors are dynamic PSO must include "
3172                                                                           "viewportCount and scissorCount in pViewportState.");
3173        } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount !=
3174                   pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
3175            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3176                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3177                                "Gfx Pipeline viewport count (%u) must match scissor count (%u).",
3178                                pPipeline->vpStateCI.viewportCount, pPipeline->vpStateCI.scissorCount);
3179        } else {
3180            // If viewport or scissor are not dynamic, then verify that data is appropriate for count
3181            VkBool32 dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3182            VkBool32 dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3183            if (!dynViewport) {
3184                if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount &&
3185                    !pPipeline->graphicsPipelineCI.pViewportState->pViewports) {
3186                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3187                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3188                                        "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you "
3189                                        "must either include pViewports data, or include viewport in pDynamicState and set it with "
3190                                        "vkCmdSetViewport().",
3191                                        pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
3192                }
3193            }
3194            if (!dynScissor) {
3195                if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount &&
3196                    !pPipeline->graphicsPipelineCI.pViewportState->pScissors) {
3197                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3198                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3199                                        "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you "
3200                                        "must either include pScissors data, or include scissor in pDynamicState and set it with "
3201                                        "vkCmdSetScissor().",
3202                                        pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3203                }
3204            }
3205        }
3206    }
3207    return skipCall;
3208}
3209
3210// Init the pipeline mapping info based on pipeline create info LL tree
3211//  Threading note : Calls to this function should wrapped in mutex
3212// TODO : this should really just be in the constructor for PIPELINE_NODE
3213static PIPELINE_NODE *initGraphicsPipeline(layer_data *dev_data, const VkGraphicsPipelineCreateInfo *pCreateInfo) {
3214    PIPELINE_NODE *pPipeline = new PIPELINE_NODE;
3215
3216    // First init create info
3217    memcpy(&pPipeline->graphicsPipelineCI, pCreateInfo, sizeof(VkGraphicsPipelineCreateInfo));
3218
3219    size_t bufferSize = 0;
3220    const VkPipelineVertexInputStateCreateInfo *pVICI = NULL;
3221    const VkPipelineColorBlendStateCreateInfo *pCBCI = NULL;
3222
3223    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
3224        const VkPipelineShaderStageCreateInfo *pPSSCI = &pCreateInfo->pStages[i];
3225
3226        switch (pPSSCI->stage) {
3227        case VK_SHADER_STAGE_VERTEX_BIT:
3228            memcpy(&pPipeline->vsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3229            pPipeline->active_shaders |= VK_SHADER_STAGE_VERTEX_BIT;
3230            break;
3231        case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
3232            memcpy(&pPipeline->tcsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3233            pPipeline->active_shaders |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
3234            break;
3235        case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
3236            memcpy(&pPipeline->tesCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3237            pPipeline->active_shaders |= VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
3238            break;
3239        case VK_SHADER_STAGE_GEOMETRY_BIT:
3240            memcpy(&pPipeline->gsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3241            pPipeline->active_shaders |= VK_SHADER_STAGE_GEOMETRY_BIT;
3242            break;
3243        case VK_SHADER_STAGE_FRAGMENT_BIT:
3244            memcpy(&pPipeline->fsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3245            pPipeline->active_shaders |= VK_SHADER_STAGE_FRAGMENT_BIT;
3246            break;
3247        case VK_SHADER_STAGE_COMPUTE_BIT:
3248            // TODO : Flag error, CS is specified through VkComputePipelineCreateInfo
3249            pPipeline->active_shaders |= VK_SHADER_STAGE_COMPUTE_BIT;
3250            break;
3251        default:
3252            // TODO : Flag error
3253            break;
3254        }
3255    }
3256    // Copy over GraphicsPipelineCreateInfo structure embedded pointers
3257    if (pCreateInfo->stageCount != 0) {
3258        pPipeline->graphicsPipelineCI.pStages = new VkPipelineShaderStageCreateInfo[pCreateInfo->stageCount];
3259        bufferSize = pCreateInfo->stageCount * sizeof(VkPipelineShaderStageCreateInfo);
3260        memcpy((void *)pPipeline->graphicsPipelineCI.pStages, pCreateInfo->pStages, bufferSize);
3261    }
3262    if (pCreateInfo->pVertexInputState != NULL) {
3263        pPipeline->vertexInputCI = *pCreateInfo->pVertexInputState;
3264        // Copy embedded ptrs
3265        pVICI = pCreateInfo->pVertexInputState;
3266        if (pVICI->vertexBindingDescriptionCount) {
3267            pPipeline->vertexBindingDescriptions = std::vector<VkVertexInputBindingDescription>(
3268                pVICI->pVertexBindingDescriptions, pVICI->pVertexBindingDescriptions + pVICI->vertexBindingDescriptionCount);
3269        }
3270        if (pVICI->vertexAttributeDescriptionCount) {
3271            pPipeline->vertexAttributeDescriptions = std::vector<VkVertexInputAttributeDescription>(
3272                pVICI->pVertexAttributeDescriptions, pVICI->pVertexAttributeDescriptions + pVICI->vertexAttributeDescriptionCount);
3273        }
3274        pPipeline->graphicsPipelineCI.pVertexInputState = &pPipeline->vertexInputCI;
3275    }
3276    if (pCreateInfo->pInputAssemblyState != NULL) {
3277        pPipeline->iaStateCI = *pCreateInfo->pInputAssemblyState;
3278        pPipeline->graphicsPipelineCI.pInputAssemblyState = &pPipeline->iaStateCI;
3279    }
3280    if (pCreateInfo->pTessellationState != NULL) {
3281        pPipeline->tessStateCI = *pCreateInfo->pTessellationState;
3282        pPipeline->graphicsPipelineCI.pTessellationState = &pPipeline->tessStateCI;
3283    }
3284    if (pCreateInfo->pViewportState != NULL) {
3285        pPipeline->vpStateCI = *pCreateInfo->pViewportState;
3286        pPipeline->graphicsPipelineCI.pViewportState = &pPipeline->vpStateCI;
3287    }
3288    if (pCreateInfo->pRasterizationState != NULL) {
3289        pPipeline->rsStateCI = *pCreateInfo->pRasterizationState;
3290        pPipeline->graphicsPipelineCI.pRasterizationState = &pPipeline->rsStateCI;
3291    }
3292    if (pCreateInfo->pMultisampleState != NULL) {
3293        pPipeline->msStateCI = *pCreateInfo->pMultisampleState;
3294        pPipeline->graphicsPipelineCI.pMultisampleState = &pPipeline->msStateCI;
3295    }
3296    if (pCreateInfo->pDepthStencilState != NULL) {
3297        pPipeline->dsStateCI = *pCreateInfo->pDepthStencilState;
3298        pPipeline->graphicsPipelineCI.pDepthStencilState = &pPipeline->dsStateCI;
3299    }
3300    if (pCreateInfo->pColorBlendState != NULL) {
3301        pPipeline->cbStateCI = *pCreateInfo->pColorBlendState;
3302        // Copy embedded ptrs
3303        pCBCI = pCreateInfo->pColorBlendState;
3304        if (pCBCI->attachmentCount) {
3305            pPipeline->attachments = std::vector<VkPipelineColorBlendAttachmentState>(
3306                pCBCI->pAttachments, pCBCI->pAttachments + pCBCI->attachmentCount);
3307        }
3308        pPipeline->graphicsPipelineCI.pColorBlendState = &pPipeline->cbStateCI;
3309    }
3310    if (pCreateInfo->pDynamicState != NULL) {
3311        pPipeline->dynStateCI = *pCreateInfo->pDynamicState;
3312        if (pPipeline->dynStateCI.dynamicStateCount) {
3313            pPipeline->dynStateCI.pDynamicStates = new VkDynamicState[pPipeline->dynStateCI.dynamicStateCount];
3314            bufferSize = pPipeline->dynStateCI.dynamicStateCount * sizeof(VkDynamicState);
3315            memcpy((void *)pPipeline->dynStateCI.pDynamicStates, pCreateInfo->pDynamicState->pDynamicStates, bufferSize);
3316        }
3317        pPipeline->graphicsPipelineCI.pDynamicState = &pPipeline->dynStateCI;
3318    }
3319    return pPipeline;
3320}
3321
3322// Free the Pipeline nodes
3323static void deletePipelines(layer_data *my_data) {
3324    if (my_data->pipelineMap.size() <= 0)
3325        return;
3326    for (auto ii = my_data->pipelineMap.begin(); ii != my_data->pipelineMap.end(); ++ii) {
3327        if ((*ii).second->graphicsPipelineCI.stageCount != 0) {
3328            delete[](*ii).second->graphicsPipelineCI.pStages;
3329        }
3330        if ((*ii).second->dynStateCI.dynamicStateCount != 0) {
3331            delete[](*ii).second->dynStateCI.pDynamicStates;
3332        }
3333        delete (*ii).second;
3334    }
3335    my_data->pipelineMap.clear();
3336}
3337
3338// For given pipeline, return number of MSAA samples, or one if MSAA disabled
3339static VkSampleCountFlagBits getNumSamples(layer_data *my_data, const VkPipeline pipeline) {
3340    PIPELINE_NODE *pPipe = my_data->pipelineMap[pipeline];
3341    if (VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pPipe->msStateCI.sType) {
3342        return pPipe->msStateCI.rasterizationSamples;
3343    }
3344    return VK_SAMPLE_COUNT_1_BIT;
3345}
3346
3347// Validate state related to the PSO
3348static VkBool32 validatePipelineState(layer_data *my_data, const GLOBAL_CB_NODE *pCB, const VkPipelineBindPoint pipelineBindPoint,
3349                                      const VkPipeline pipeline) {
3350    if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
3351        // Verify that any MSAA request in PSO matches sample# in bound FB
3352        // Skip the check if rasterization is disabled.
3353        PIPELINE_NODE *pPipeline = my_data->pipelineMap[pipeline];
3354        if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3355            !pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
3356            VkSampleCountFlagBits psoNumSamples = getNumSamples(my_data, pipeline);
3357            if (pCB->activeRenderPass) {
3358                const VkRenderPassCreateInfo *pRPCI = my_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo;
3359                const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
3360                VkSampleCountFlagBits subpassNumSamples = (VkSampleCountFlagBits)0;
3361                uint32_t i;
3362
3363                for (i = 0; i < pSD->colorAttachmentCount; i++) {
3364                    VkSampleCountFlagBits samples;
3365
3366                    if (pSD->pColorAttachments[i].attachment == VK_ATTACHMENT_UNUSED)
3367                        continue;
3368
3369                    samples = pRPCI->pAttachments[pSD->pColorAttachments[i].attachment].samples;
3370                    if (subpassNumSamples == (VkSampleCountFlagBits)0) {
3371                        subpassNumSamples = samples;
3372                    } else if (subpassNumSamples != samples) {
3373                        subpassNumSamples = (VkSampleCountFlagBits)-1;
3374                        break;
3375                    }
3376                }
3377                if (pSD->pDepthStencilAttachment && pSD->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3378                    const VkSampleCountFlagBits samples = pRPCI->pAttachments[pSD->pDepthStencilAttachment->attachment].samples;
3379                    if (subpassNumSamples == (VkSampleCountFlagBits)0)
3380                        subpassNumSamples = samples;
3381                    else if (subpassNumSamples != samples)
3382                        subpassNumSamples = (VkSampleCountFlagBits)-1;
3383                }
3384
3385                if (psoNumSamples != subpassNumSamples) {
3386                    return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3387                                   (uint64_t)pipeline, __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
3388                                   "Num samples mismatch! Binding PSO (%#" PRIxLEAST64
3389                                   ") with %u samples while current RenderPass (%#" PRIxLEAST64 ") w/ %u samples!",
3390                                   (uint64_t)pipeline, psoNumSamples, (uint64_t)pCB->activeRenderPass, subpassNumSamples);
3391                }
3392            } else {
3393                // TODO : I believe it's an error if we reach this point and don't have an activeRenderPass
3394                //   Verify and flag error as appropriate
3395            }
3396        }
3397        // TODO : Add more checks here
3398    } else {
3399        // TODO : Validate non-gfx pipeline updates
3400    }
3401    return VK_FALSE;
3402}
3403
3404// Block of code at start here specifically for managing/tracking DSs
3405
3406// Return Pool node ptr for specified pool or else NULL
3407static DESCRIPTOR_POOL_NODE *getPoolNode(layer_data *my_data, const VkDescriptorPool pool) {
3408    if (my_data->descriptorPoolMap.find(pool) == my_data->descriptorPoolMap.end()) {
3409        return NULL;
3410    }
3411    return my_data->descriptorPoolMap[pool];
3412}
3413
3414static LAYOUT_NODE *getLayoutNode(layer_data *my_data, const VkDescriptorSetLayout layout) {
3415    if (my_data->descriptorSetLayoutMap.find(layout) == my_data->descriptorSetLayoutMap.end()) {
3416        return NULL;
3417    }
3418    return my_data->descriptorSetLayoutMap[layout];
3419}
3420
3421// Return VK_FALSE if update struct is of valid type, otherwise flag error and return code from callback
3422static VkBool32 validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3423    switch (pUpdateStruct->sType) {
3424    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3425    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3426        return VK_FALSE;
3427    default:
3428        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3429                       DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3430                       "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3431                       string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3432    }
3433}
3434
3435// Set count for given update struct in the last parameter
3436// Return value of skipCall, which is only VK_TRUE if error occurs and callback signals execution to cease
3437static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3438    switch (pUpdateStruct->sType) {
3439    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3440        return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3441    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3442        // TODO : Need to understand this case better and make sure code is correct
3443        return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3444    default:
3445        return 0;
3446    }
3447    return 0;
3448}
3449
3450// For given layout and update, return the first overall index of the layout that is updated
3451static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout, const uint32_t binding,
3452                                    const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3453    return getBindingStartIndex(pLayout, binding) + arrayIndex;
3454}
3455
3456// For given layout and update, return the last overall index of the layout that is updated
3457static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout, const uint32_t binding,
3458                                  const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3459    uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3460    return getBindingStartIndex(pLayout, binding) + arrayIndex + count - 1;
3461}
3462
3463// Verify that the descriptor type in the update struct matches what's expected by the layout
3464static VkBool32 validateUpdateConsistency(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout,
3465                                          const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3466    // First get actual type of update
3467    VkBool32 skipCall = VK_FALSE;
3468    VkDescriptorType actualType;
3469    uint32_t i = 0;
3470    switch (pUpdateStruct->sType) {
3471    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3472        actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3473        break;
3474    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3475        /* no need to validate */
3476        return VK_FALSE;
3477        break;
3478    default:
3479        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3480                            DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3481                            "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3482                            string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3483    }
3484    if (VK_FALSE == skipCall) {
3485        // Set first stageFlags as reference and verify that all other updates match it
3486        VkShaderStageFlags refStageFlags = pLayout->stageFlags[startIndex];
3487        for (i = startIndex; i <= endIndex; i++) {
3488            if (pLayout->descriptorTypes[i] != actualType) {
3489                skipCall |= log_msg(
3490                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3491                    DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3492                    "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3493                    string_VkDescriptorType(actualType), string_VkDescriptorType(pLayout->descriptorTypes[i]));
3494            }
3495            if (pLayout->stageFlags[i] != refStageFlags) {
3496                skipCall |= log_msg(
3497                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3498                    DRAWSTATE_DESCRIPTOR_STAGEFLAGS_MISMATCH, "DS",
3499                    "Write descriptor update has stageFlags %x that do not match overlapping binding descriptor stageFlags of %x!",
3500                    refStageFlags, pLayout->stageFlags[i]);
3501            }
3502        }
3503    }
3504    return skipCall;
3505}
3506
3507// Determine the update type, allocate a new struct of that type, shadow the given pUpdate
3508//   struct into the pNewNode param. Return VK_TRUE if error condition encountered and callback signals early exit.
3509// NOTE : Calls to this function should be wrapped in mutex
3510static VkBool32 shadowUpdateNode(layer_data *my_data, const VkDevice device, GENERIC_HEADER *pUpdate, GENERIC_HEADER **pNewNode) {
3511    VkBool32 skipCall = VK_FALSE;
3512    VkWriteDescriptorSet *pWDS = NULL;
3513    VkCopyDescriptorSet *pCDS = NULL;
3514    switch (pUpdate->sType) {
3515    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3516        pWDS = new VkWriteDescriptorSet;
3517        *pNewNode = (GENERIC_HEADER *)pWDS;
3518        memcpy(pWDS, pUpdate, sizeof(VkWriteDescriptorSet));
3519
3520        switch (pWDS->descriptorType) {
3521        case VK_DESCRIPTOR_TYPE_SAMPLER:
3522        case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3523        case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3524        case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
3525            VkDescriptorImageInfo *info = new VkDescriptorImageInfo[pWDS->descriptorCount];
3526            memcpy(info, pWDS->pImageInfo, pWDS->descriptorCount * sizeof(VkDescriptorImageInfo));
3527            pWDS->pImageInfo = info;
3528        } break;
3529        case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
3530        case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
3531            VkBufferView *info = new VkBufferView[pWDS->descriptorCount];
3532            memcpy(info, pWDS->pTexelBufferView, pWDS->descriptorCount * sizeof(VkBufferView));
3533            pWDS->pTexelBufferView = info;
3534        } break;
3535        case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3536        case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3537        case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
3538        case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
3539            VkDescriptorBufferInfo *info = new VkDescriptorBufferInfo[pWDS->descriptorCount];
3540            memcpy(info, pWDS->pBufferInfo, pWDS->descriptorCount * sizeof(VkDescriptorBufferInfo));
3541            pWDS->pBufferInfo = info;
3542        } break;
3543        default:
3544            return VK_ERROR_VALIDATION_FAILED_EXT;
3545            break;
3546        }
3547        break;
3548    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3549        pCDS = new VkCopyDescriptorSet;
3550        *pNewNode = (GENERIC_HEADER *)pCDS;
3551        memcpy(pCDS, pUpdate, sizeof(VkCopyDescriptorSet));
3552        break;
3553    default:
3554        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3555                    DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3556                    "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3557                    string_VkStructureType(pUpdate->sType), pUpdate->sType))
3558            return VK_TRUE;
3559    }
3560    // Make sure that pNext for the end of shadow copy is NULL
3561    (*pNewNode)->pNext = NULL;
3562    return skipCall;
3563}
3564
3565// Verify that given sampler is valid
3566static VkBool32 validateSampler(const layer_data *my_data, const VkSampler *pSampler, const VkBool32 immutable) {
3567    VkBool32 skipCall = VK_FALSE;
3568    auto sampIt = my_data->sampleMap.find(*pSampler);
3569    if (sampIt == my_data->sampleMap.end()) {
3570        if (!immutable) {
3571            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3572                                (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS",
3573                                "vkUpdateDescriptorSets: Attempt to update descriptor with invalid sampler %#" PRIxLEAST64,
3574                                (uint64_t)*pSampler);
3575        } else { // immutable
3576            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3577                                (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS",
3578                                "vkUpdateDescriptorSets: Attempt to update descriptor whose binding has an invalid immutable "
3579                                "sampler %#" PRIxLEAST64,
3580                                (uint64_t)*pSampler);
3581        }
3582    } else {
3583        // TODO : Any further checks we want to do on the sampler?
3584    }
3585    return skipCall;
3586}
3587
3588// find layout(s) on the cmd buf level
3589bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3590    ImageSubresourcePair imgpair = {image, true, range};
3591    auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3592    if (imgsubIt == pCB->imageLayoutMap.end()) {
3593        imgpair = {image, false, VkImageSubresource()};
3594        imgsubIt = pCB->imageLayoutMap.find(imgpair);
3595        if (imgsubIt == pCB->imageLayoutMap.end())
3596            return false;
3597    }
3598    node = imgsubIt->second;
3599    return true;
3600}
3601
3602// find layout(s) on the global level
3603bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3604    auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3605    if (imgsubIt == my_data->imageLayoutMap.end()) {
3606        imgpair = {imgpair.image, false, VkImageSubresource()};
3607        imgsubIt = my_data->imageLayoutMap.find(imgpair);
3608        if (imgsubIt == my_data->imageLayoutMap.end())
3609            return false;
3610    }
3611    layout = imgsubIt->second.layout;
3612    return true;
3613}
3614
3615bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3616    ImageSubresourcePair imgpair = {image, true, range};
3617    return FindLayout(my_data, imgpair, layout);
3618}
3619
3620bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3621    auto sub_data = my_data->imageSubresourceMap.find(image);
3622    if (sub_data == my_data->imageSubresourceMap.end())
3623        return false;
3624    auto imgIt = my_data->imageMap.find(image);
3625    if (imgIt == my_data->imageMap.end())
3626        return false;
3627    bool ignoreGlobal = false;
3628    // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3629    // potential errors in this case.
3630    if (sub_data->second.size() >= (imgIt->second.createInfo.arrayLayers * imgIt->second.createInfo.mipLevels + 1)) {
3631        ignoreGlobal = true;
3632    }
3633    for (auto imgsubpair : sub_data->second) {
3634        if (ignoreGlobal && !imgsubpair.hasSubresource)
3635            continue;
3636        auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3637        if (img_data != my_data->imageLayoutMap.end()) {
3638            layouts.push_back(img_data->second.layout);
3639        }
3640    }
3641    return true;
3642}
3643
3644// Set the layout on the global level
3645void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3646    VkImage &image = imgpair.image;
3647    // TODO (mlentine): Maybe set format if new? Not used atm.
3648    my_data->imageLayoutMap[imgpair].layout = layout;
3649    // TODO (mlentine): Maybe make vector a set?
3650    auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3651    if (subresource == my_data->imageSubresourceMap[image].end()) {
3652        my_data->imageSubresourceMap[image].push_back(imgpair);
3653    }
3654}
3655
3656// Set the layout on the cmdbuf level
3657void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3658    pCB->imageLayoutMap[imgpair] = node;
3659    // TODO (mlentine): Maybe make vector a set?
3660    auto subresource =
3661        std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair);
3662    if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) {
3663        pCB->imageSubresourceMap[imgpair.image].push_back(imgpair);
3664    }
3665}
3666
3667void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3668    // TODO (mlentine): Maybe make vector a set?
3669    if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) !=
3670        pCB->imageSubresourceMap[imgpair.image].end()) {
3671        pCB->imageLayoutMap[imgpair].layout = layout;
3672    } else {
3673        // TODO (mlentine): Could be expensive and might need to be removed.
3674        assert(imgpair.hasSubresource);
3675        IMAGE_CMD_BUF_LAYOUT_NODE node;
3676        if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) {
3677            node.initialLayout = layout;
3678        }
3679        SetLayout(pCB, imgpair, {node.initialLayout, layout});
3680    }
3681}
3682
3683template <class OBJECT, class LAYOUT>
3684void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
3685    if (imgpair.subresource.aspectMask & aspectMask) {
3686        imgpair.subresource.aspectMask = aspectMask;
3687        SetLayout(pObject, imgpair, layout);
3688    }
3689}
3690
3691template <class OBJECT, class LAYOUT>
3692void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
3693    ImageSubresourcePair imgpair = {image, true, range};
3694    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3695    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3696    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3697    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3698}
3699
3700template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) {
3701    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3702    SetLayout(pObject, image, imgpair, layout);
3703}
3704
3705void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
3706    auto image_view_data = dev_data->imageViewMap.find(imageView);
3707    assert(image_view_data != dev_data->imageViewMap.end());
3708    const VkImage &image = image_view_data->second.image;
3709    const VkImageSubresourceRange &subRange = image_view_data->second.subresourceRange;
3710    // TODO: Do not iterate over every possibility - consolidate where possible
3711    for (uint32_t j = 0; j < subRange.levelCount; j++) {
3712        uint32_t level = subRange.baseMipLevel + j;
3713        for (uint32_t k = 0; k < subRange.layerCount; k++) {
3714            uint32_t layer = subRange.baseArrayLayer + k;
3715            VkImageSubresource sub = {subRange.aspectMask, level, layer};
3716            SetLayout(pCB, image, sub, layout);
3717        }
3718    }
3719}
3720
3721// Verify that given imageView is valid
3722static VkBool32 validateImageView(const layer_data *my_data, const VkImageView *pImageView, const VkImageLayout imageLayout) {
3723    VkBool32 skipCall = VK_FALSE;
3724    auto ivIt = my_data->imageViewMap.find(*pImageView);
3725    if (ivIt == my_data->imageViewMap.end()) {
3726        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3727                            (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3728                            "vkUpdateDescriptorSets: Attempt to update descriptor with invalid imageView %#" PRIxLEAST64,
3729                            (uint64_t)*pImageView);
3730    } else {
3731        // Validate that imageLayout is compatible with aspectMask and image format
3732        VkImageAspectFlags aspectMask = ivIt->second.subresourceRange.aspectMask;
3733        VkImage image = ivIt->second.image;
3734        // TODO : Check here in case we have a bad image
3735        VkFormat format = VK_FORMAT_MAX_ENUM;
3736        auto imgIt = my_data->imageMap.find(image);
3737        if (imgIt != my_data->imageMap.end()) {
3738            format = (*imgIt).second.createInfo.format;
3739        } else {
3740            // Also need to check the swapchains.
3741            auto swapchainIt = my_data->device_extensions.imageToSwapchainMap.find(image);
3742            if (swapchainIt != my_data->device_extensions.imageToSwapchainMap.end()) {
3743                VkSwapchainKHR swapchain = swapchainIt->second;
3744                auto swapchain_nodeIt = my_data->device_extensions.swapchainMap.find(swapchain);
3745                if (swapchain_nodeIt != my_data->device_extensions.swapchainMap.end()) {
3746                    SWAPCHAIN_NODE *pswapchain_node = swapchain_nodeIt->second;
3747                    format = pswapchain_node->createInfo.imageFormat;
3748                }
3749            }
3750        }
3751        if (format == VK_FORMAT_MAX_ENUM) {
3752            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3753                                (uint64_t)image, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3754                                "vkUpdateDescriptorSets: Attempt to update descriptor with invalid image %#" PRIxLEAST64
3755                                " in imageView %#" PRIxLEAST64,
3756                                (uint64_t)image, (uint64_t)*pImageView);
3757        } else {
3758            VkBool32 ds = vk_format_is_depth_or_stencil(format);
3759            switch (imageLayout) {
3760            case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
3761                // Only Color bit must be set
3762                if ((aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) {
3763                    skipCall |=
3764                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3765                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3766                                "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL "
3767                                "and imageView %#" PRIxLEAST64 ""
3768                                " that does not have VK_IMAGE_ASPECT_COLOR_BIT set.",
3769                                (uint64_t)*pImageView);
3770                }
3771                // format must NOT be DS
3772                if (ds) {
3773                    skipCall |=
3774                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3775                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3776                                "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL "
3777                                "and imageView %#" PRIxLEAST64 ""
3778                                " but the image format is %s which is not a color format.",
3779                                (uint64_t)*pImageView, string_VkFormat(format));
3780                }
3781                break;
3782            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
3783            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
3784                // Depth or stencil bit must be set, but both must NOT be set
3785                if (aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
3786                    if (aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) {
3787                        // both  must NOT be set
3788                        skipCall |=
3789                            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3790                                    (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3791                                    "vkUpdateDescriptorSets: Updating descriptor with imageView %#" PRIxLEAST64 ""
3792                                    " that has both STENCIL and DEPTH aspects set",
3793                                    (uint64_t)*pImageView);
3794                    }
3795                } else if (!(aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
3796                    // Neither were set
3797                    skipCall |=
3798                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3799                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3800                                "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 ""
3801                                " that does not have STENCIL or DEPTH aspect set.",
3802                                string_VkImageLayout(imageLayout), (uint64_t)*pImageView);
3803                }
3804                // format must be DS
3805                if (!ds) {
3806                    skipCall |=
3807                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3808                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3809                                "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 ""
3810                                " but the image format is %s which is not a depth/stencil format.",
3811                                string_VkImageLayout(imageLayout), (uint64_t)*pImageView, string_VkFormat(format));
3812                }
3813                break;
3814            default:
3815                // anything to check for other layouts?
3816                break;
3817            }
3818        }
3819    }
3820    return skipCall;
3821}
3822
3823// Verify that given bufferView is valid
3824static VkBool32 validateBufferView(const layer_data *my_data, const VkBufferView *pBufferView) {
3825    VkBool32 skipCall = VK_FALSE;
3826    auto sampIt = my_data->bufferViewMap.find(*pBufferView);
3827    if (sampIt == my_data->bufferViewMap.end()) {
3828        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT,
3829                            (uint64_t)*pBufferView, __LINE__, DRAWSTATE_BUFFERVIEW_DESCRIPTOR_ERROR, "DS",
3830                            "vkUpdateDescriptorSets: Attempt to update descriptor with invalid bufferView %#" PRIxLEAST64,
3831                            (uint64_t)*pBufferView);
3832    } else {
3833        // TODO : Any further checks we want to do on the bufferView?
3834    }
3835    return skipCall;
3836}
3837
3838// Verify that given bufferInfo is valid
3839static VkBool32 validateBufferInfo(const layer_data *my_data, const VkDescriptorBufferInfo *pBufferInfo) {
3840    VkBool32 skipCall = VK_FALSE;
3841    auto sampIt = my_data->bufferMap.find(pBufferInfo->buffer);
3842    if (sampIt == my_data->bufferMap.end()) {
3843        skipCall |=
3844            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3845                    (uint64_t)pBufferInfo->buffer, __LINE__, DRAWSTATE_BUFFERINFO_DESCRIPTOR_ERROR, "DS",
3846                    "vkUpdateDescriptorSets: Attempt to update descriptor where bufferInfo has invalid buffer %#" PRIxLEAST64,
3847                    (uint64_t)pBufferInfo->buffer);
3848    } else {
3849        // TODO : Any further checks we want to do on the bufferView?
3850    }
3851    return skipCall;
3852}
3853
3854static VkBool32 validateUpdateContents(const layer_data *my_data, const VkWriteDescriptorSet *pWDS,
3855                                       const VkDescriptorSetLayoutBinding *pLayoutBinding) {
3856    VkBool32 skipCall = VK_FALSE;
3857    // First verify that for the given Descriptor type, the correct DescriptorInfo data is supplied
3858    const VkSampler *pSampler = NULL;
3859    VkBool32 immutable = VK_FALSE;
3860    uint32_t i = 0;
3861    // For given update type, verify that update contents are correct
3862    switch (pWDS->descriptorType) {
3863    case VK_DESCRIPTOR_TYPE_SAMPLER:
3864        for (i = 0; i < pWDS->descriptorCount; ++i) {
3865            skipCall |= validateSampler(my_data, &(pWDS->pImageInfo[i].sampler), immutable);
3866        }
3867        break;
3868    case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3869        for (i = 0; i < pWDS->descriptorCount; ++i) {
3870            if (NULL == pLayoutBinding->pImmutableSamplers) {
3871                pSampler = &(pWDS->pImageInfo[i].sampler);
3872                if (immutable) {
3873                    skipCall |= log_msg(
3874                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3875                        (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS",
3876                        "vkUpdateDescriptorSets: Update #%u is not an immutable sampler %#" PRIxLEAST64
3877                        ", but previous update(s) from this "
3878                        "VkWriteDescriptorSet struct used an immutable sampler. All updates from a single struct must either "
3879                        "use immutable or non-immutable samplers.",
3880                        i, (uint64_t)*pSampler);
3881                }
3882            } else {
3883                if (i > 0 && !immutable) {
3884                    skipCall |= log_msg(
3885                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3886                        (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS",
3887                        "vkUpdateDescriptorSets: Update #%u is an immutable sampler, but previous update(s) from this "
3888                        "VkWriteDescriptorSet struct used a non-immutable sampler. All updates from a single struct must either "
3889                        "use immutable or non-immutable samplers.",
3890                        i);
3891                }
3892                immutable = VK_TRUE;
3893                pSampler = &(pLayoutBinding->pImmutableSamplers[i]);
3894            }
3895            skipCall |= validateSampler(my_data, pSampler, immutable);
3896        }
3897    // Intentionally fall through here to also validate image stuff
3898    case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3899    case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
3900    case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
3901        for (i = 0; i < pWDS->descriptorCount; ++i) {
3902            skipCall |= validateImageView(my_data, &(pWDS->pImageInfo[i].imageView), pWDS->pImageInfo[i].imageLayout);
3903        }
3904        break;
3905    case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
3906    case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
3907        for (i = 0; i < pWDS->descriptorCount; ++i) {
3908            skipCall |= validateBufferView(my_data, &(pWDS->pTexelBufferView[i]));
3909        }
3910        break;
3911    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3912    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3913    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
3914    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
3915        for (i = 0; i < pWDS->descriptorCount; ++i) {
3916            skipCall |= validateBufferInfo(my_data, &(pWDS->pBufferInfo[i]));
3917        }
3918        break;
3919    default:
3920        break;
3921    }
3922    return skipCall;
3923}
3924// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3925// func_str is the name of the calling function
3926// Return VK_FALSE if no errors occur
3927// Return VK_TRUE if validation error occurs and callback returns VK_TRUE (to skip upcoming API call down the chain)
3928VkBool32 validateIdleDescriptorSet(const layer_data *my_data, VkDescriptorSet set, std::string func_str) {
3929    VkBool32 skip_call = VK_FALSE;
3930    auto set_node = my_data->setMap.find(set);
3931    if (set_node == my_data->setMap.end()) {
3932        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3933                             (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3934                             "Cannot call %s() on descriptor set %" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3935                             (uint64_t)(set));
3936    } else {
3937        if (set_node->second->in_use.load()) {
3938            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3939                                 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_OBJECT_INUSE,
3940                                 "DS", "Cannot call %s() on descriptor set %" PRIxLEAST64 " that is in use by a command buffer.",
3941                                 func_str.c_str(), (uint64_t)(set));
3942        }
3943    }
3944    return skip_call;
3945}
3946static void invalidateBoundCmdBuffers(layer_data *dev_data, const SET_NODE *pSet) {
3947    // Flag any CBs this set is bound to as INVALID
3948    for (auto cb : pSet->boundCmdBuffers) {
3949        auto cb_node = dev_data->commandBufferMap.find(cb);
3950        if (cb_node != dev_data->commandBufferMap.end()) {
3951            cb_node->second->state = CB_INVALID;
3952        }
3953    }
3954}
3955// update DS mappings based on write and copy update arrays
3956static VkBool32 dsUpdate(layer_data *my_data, VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pWDS,
3957                         uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pCDS) {
3958    VkBool32 skipCall = VK_FALSE;
3959
3960    LAYOUT_NODE *pLayout = NULL;
3961    VkDescriptorSetLayoutCreateInfo *pLayoutCI = NULL;
3962    // Validate Write updates
3963    uint32_t i = 0;
3964    for (i = 0; i < descriptorWriteCount; i++) {
3965        VkDescriptorSet ds = pWDS[i].dstSet;
3966        SET_NODE *pSet = my_data->setMap[ds];
3967        // Set being updated cannot be in-flight
3968        if ((skipCall = validateIdleDescriptorSet(my_data, ds, "VkUpdateDescriptorSets")) == VK_TRUE)
3969            return skipCall;
3970        // If set is bound to any cmdBuffers, mark them invalid
3971        invalidateBoundCmdBuffers(my_data, pSet);
3972        GENERIC_HEADER *pUpdate = (GENERIC_HEADER *)&pWDS[i];
3973        pLayout = pSet->pLayout;
3974        // First verify valid update struct
3975        if ((skipCall = validUpdateStruct(my_data, device, pUpdate)) == VK_TRUE) {
3976            break;
3977        }
3978        uint32_t binding = 0, endIndex = 0;
3979        binding = pWDS[i].dstBinding;
3980        auto bindingToIndex = pLayout->bindingToIndexMap.find(binding);
3981        // Make sure that layout being updated has the binding being updated
3982        if (bindingToIndex == pLayout->bindingToIndexMap.end()) {
3983            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3984                                (uint64_t)(ds), __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
3985                                "Descriptor Set %" PRIu64 " does not have binding to match "
3986                                "update binding %u for update type "
3987                                "%s!",
3988                                (uint64_t)(ds), binding, string_VkStructureType(pUpdate->sType));
3989        } else {
3990            // Next verify that update falls within size of given binding
3991            endIndex = getUpdateEndIndex(my_data, device, pLayout, binding, pWDS[i].dstArrayElement, pUpdate);
3992            if (getBindingEndIndex(pLayout, binding) < endIndex) {
3993                pLayoutCI = &pLayout->createInfo;
3994                string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS}    ");
3995                skipCall |=
3996                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3997                            (uint64_t)(ds), __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
3998                            "Descriptor update type of %s is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
3999                            string_VkStructureType(pUpdate->sType), binding, DSstr.c_str());
4000            } else { // TODO : should we skip update on a type mismatch or force it?
4001                uint32_t startIndex;
4002                startIndex = getUpdateStartIndex(my_data, device, pLayout, binding, pWDS[i].dstArrayElement, pUpdate);
4003                // Layout bindings match w/ update, now verify that update type
4004                // & stageFlags are the same for entire update
4005                if ((skipCall = validateUpdateConsistency(my_data, device, pLayout, pUpdate, startIndex, endIndex)) == VK_FALSE) {
4006                    // The update is within bounds and consistent, but need to
4007                    // make sure contents make sense as well
4008                    if ((skipCall = validateUpdateContents(my_data, &pWDS[i],
4009                                                           &pLayout->createInfo.pBindings[bindingToIndex->second])) == VK_FALSE) {
4010                        // Update is good. Save the update info
4011                        // Create new update struct for this set's shadow copy
4012                        GENERIC_HEADER *pNewNode = NULL;
4013                        skipCall |= shadowUpdateNode(my_data, device, pUpdate, &pNewNode);
4014                        if (NULL == pNewNode) {
4015                            skipCall |= log_msg(
4016                                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4017                                (uint64_t)(ds), __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
4018                                "Out of memory while attempting to allocate UPDATE struct in vkUpdateDescriptors()");
4019                        } else {
4020                            // Insert shadow node into LL of updates for this set
4021                            pNewNode->pNext = pSet->pUpdateStructs;
4022                            pSet->pUpdateStructs = pNewNode;
4023                            // Now update appropriate descriptor(s) to point to new Update node
4024                            for (uint32_t j = startIndex; j <= endIndex; j++) {
4025                                assert(j < pSet->descriptorCount);
4026                                pSet->pDescriptorUpdates[j] = pNewNode;
4027                            }
4028                        }
4029                    }
4030                }
4031            }
4032        }
4033    }
4034    // Now validate copy updates
4035    for (i = 0; i < descriptorCopyCount; ++i) {
4036        SET_NODE *pSrcSet = NULL, *pDstSet = NULL;
4037        LAYOUT_NODE *pSrcLayout = NULL, *pDstLayout = NULL;
4038        uint32_t srcStartIndex = 0, srcEndIndex = 0, dstStartIndex = 0, dstEndIndex = 0;
4039        // For each copy make sure that update falls within given layout and that types match
4040        pSrcSet = my_data->setMap[pCDS[i].srcSet];
4041        pDstSet = my_data->setMap[pCDS[i].dstSet];
4042        // Set being updated cannot be in-flight
4043        if ((skipCall = validateIdleDescriptorSet(my_data, pDstSet->set, "VkUpdateDescriptorSets")) == VK_TRUE)
4044            return skipCall;
4045        invalidateBoundCmdBuffers(my_data, pDstSet);
4046        pSrcLayout = pSrcSet->pLayout;
4047        pDstLayout = pDstSet->pLayout;
4048        // Validate that src binding is valid for src set layout
4049        if (pSrcLayout->bindingToIndexMap.find(pCDS[i].srcBinding) == pSrcLayout->bindingToIndexMap.end()) {
4050            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4051                                (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
4052                                "Copy descriptor update %u has srcBinding %u "
4053                                "which is out of bounds for underlying SetLayout "
4054                                "%#" PRIxLEAST64 " which only has bindings 0-%u.",
4055                                i, pCDS[i].srcBinding, (uint64_t)pSrcLayout->layout, pSrcLayout->createInfo.bindingCount - 1);
4056        } else if (pDstLayout->bindingToIndexMap.find(pCDS[i].dstBinding) == pDstLayout->bindingToIndexMap.end()) {
4057            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4058                                (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
4059                                "Copy descriptor update %u has dstBinding %u "
4060                                "which is out of bounds for underlying SetLayout "
4061                                "%#" PRIxLEAST64 " which only has bindings 0-%u.",
4062                                i, pCDS[i].dstBinding, (uint64_t)pDstLayout->layout, pDstLayout->createInfo.bindingCount - 1);
4063        } else {
4064            // Proceed with validation. Bindings are ok, but make sure update is within bounds of given layout
4065            srcEndIndex = getUpdateEndIndex(my_data, device, pSrcLayout, pCDS[i].srcBinding, pCDS[i].srcArrayElement,
4066                                            (const GENERIC_HEADER *)&(pCDS[i]));
4067            dstEndIndex = getUpdateEndIndex(my_data, device, pDstLayout, pCDS[i].dstBinding, pCDS[i].dstArrayElement,
4068                                            (const GENERIC_HEADER *)&(pCDS[i]));
4069            if (getBindingEndIndex(pSrcLayout, pCDS[i].srcBinding) < srcEndIndex) {
4070                pLayoutCI = &pSrcLayout->createInfo;
4071                string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS}    ");
4072                skipCall |=
4073                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4074                            (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
4075                            "Copy descriptor src update is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
4076                            pCDS[i].srcBinding, DSstr.c_str());
4077            } else if (getBindingEndIndex(pDstLayout, pCDS[i].dstBinding) < dstEndIndex) {
4078                pLayoutCI = &pDstLayout->createInfo;
4079                string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS}    ");
4080                skipCall |=
4081                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4082                            (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
4083                            "Copy descriptor dest update is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
4084                            pCDS[i].dstBinding, DSstr.c_str());
4085            } else {
4086                srcStartIndex = getUpdateStartIndex(my_data, device, pSrcLayout, pCDS[i].srcBinding, pCDS[i].srcArrayElement,
4087                                                    (const GENERIC_HEADER *)&(pCDS[i]));
4088                dstStartIndex = getUpdateStartIndex(my_data, device, pDstLayout, pCDS[i].dstBinding, pCDS[i].dstArrayElement,
4089                                                    (const GENERIC_HEADER *)&(pCDS[i]));
4090                for (uint32_t j = 0; j < pCDS[i].descriptorCount; ++j) {
4091                    // For copy just make sure that the types match and then perform the update
4092                    if (pSrcLayout->descriptorTypes[srcStartIndex + j] != pDstLayout->descriptorTypes[dstStartIndex + j]) {
4093                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4094                                            __LINE__, DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
4095                                            "Copy descriptor update index %u, update count #%u, has src update descriptor type %s "
4096                                            "that does not match overlapping dest descriptor type of %s!",
4097                                            i, j + 1, string_VkDescriptorType(pSrcLayout->descriptorTypes[srcStartIndex + j]),
4098                                            string_VkDescriptorType(pDstLayout->descriptorTypes[dstStartIndex + j]));
4099                    } else {
4100                        // point dst descriptor at corresponding src descriptor
4101                        // TODO : This may be a hole. I believe copy should be its own copy,
4102                        //  otherwise a subsequent write update to src will incorrectly affect the copy
4103                        pDstSet->pDescriptorUpdates[j + dstStartIndex] = pSrcSet->pDescriptorUpdates[j + srcStartIndex];
4104                        pDstSet->pUpdateStructs = pSrcSet->pUpdateStructs;
4105                    }
4106                }
4107            }
4108        }
4109    }
4110    return skipCall;
4111}
4112
4113// Verify that given pool has descriptors that are being requested for allocation.
4114// NOTE : Calls to this function should be wrapped in mutex
4115static VkBool32 validate_descriptor_availability_in_pool(layer_data *dev_data, DESCRIPTOR_POOL_NODE *pPoolNode, uint32_t count,
4116                                                         const VkDescriptorSetLayout *pSetLayouts) {
4117    VkBool32 skipCall = VK_FALSE;
4118    uint32_t i = 0;
4119    uint32_t j = 0;
4120
4121    // Track number of descriptorSets allowable in this pool
4122    if (pPoolNode->availableSets < count) {
4123        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4124                            reinterpret_cast<uint64_t &>(pPoolNode->pool), __LINE__, DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS",
4125                            "Unable to allocate %u descriptorSets from pool %#" PRIxLEAST64
4126                            ". This pool only has %d descriptorSets remaining.",
4127                            count, reinterpret_cast<uint64_t &>(pPoolNode->pool), pPoolNode->availableSets);
4128    } else {
4129        pPoolNode->availableSets -= count;
4130    }
4131
4132    for (i = 0; i < count; ++i) {
4133        LAYOUT_NODE *pLayout = getLayoutNode(dev_data, pSetLayouts[i]);
4134        if (NULL == pLayout) {
4135            skipCall |=
4136                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
4137                        (uint64_t)pSetLayouts[i], __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
4138                        "Unable to find set layout node for layout %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
4139                        (uint64_t)pSetLayouts[i]);
4140        } else {
4141            uint32_t typeIndex = 0, poolSizeCount = 0;
4142            for (j = 0; j < pLayout->createInfo.bindingCount; ++j) {
4143                typeIndex = static_cast<uint32_t>(pLayout->createInfo.pBindings[j].descriptorType);
4144                poolSizeCount = pLayout->createInfo.pBindings[j].descriptorCount;
4145                if (poolSizeCount > pPoolNode->availableDescriptorTypeCount[typeIndex]) {
4146                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4147                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)pLayout->layout, __LINE__,
4148                                        DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS",
4149                                        "Unable to allocate %u descriptors of type %s from pool %#" PRIxLEAST64
4150                                        ". This pool only has %d descriptors of this type remaining.",
4151                                        poolSizeCount, string_VkDescriptorType(pLayout->createInfo.pBindings[j].descriptorType),
4152                                        (uint64_t)pPoolNode->pool, pPoolNode->availableDescriptorTypeCount[typeIndex]);
4153                } else { // Decrement available descriptors of this type
4154                    pPoolNode->availableDescriptorTypeCount[typeIndex] -= poolSizeCount;
4155                }
4156            }
4157        }
4158    }
4159    return skipCall;
4160}
4161
4162// Free the shadowed update node for this Set
4163// NOTE : Calls to this function should be wrapped in mutex
4164static void freeShadowUpdateTree(SET_NODE *pSet) {
4165    GENERIC_HEADER *pShadowUpdate = pSet->pUpdateStructs;
4166    pSet->pUpdateStructs = NULL;
4167    GENERIC_HEADER *pFreeUpdate = pShadowUpdate;
4168    // Clear the descriptor mappings as they will now be invalid
4169    pSet->pDescriptorUpdates.clear();
4170    while (pShadowUpdate) {
4171        pFreeUpdate = pShadowUpdate;
4172        pShadowUpdate = (GENERIC_HEADER *)pShadowUpdate->pNext;
4173        VkWriteDescriptorSet *pWDS = NULL;
4174        switch (pFreeUpdate->sType) {
4175        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
4176            pWDS = (VkWriteDescriptorSet *)pFreeUpdate;
4177            switch (pWDS->descriptorType) {
4178            case VK_DESCRIPTOR_TYPE_SAMPLER:
4179            case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
4180            case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
4181            case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
4182                delete[] pWDS->pImageInfo;
4183            } break;
4184            case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
4185            case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
4186                delete[] pWDS->pTexelBufferView;
4187            } break;
4188            case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
4189            case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
4190            case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
4191            case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
4192                delete[] pWDS->pBufferInfo;
4193            } break;
4194            default:
4195                break;
4196            }
4197            break;
4198        case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
4199            break;
4200        default:
4201            assert(0);
4202            break;
4203        }
4204        delete pFreeUpdate;
4205    }
4206}
4207
4208// Free all DS Pools including their Sets & related sub-structs
4209// NOTE : Calls to this function should be wrapped in mutex
4210static void deletePools(layer_data *my_data) {
4211    if (my_data->descriptorPoolMap.size() <= 0)
4212        return;
4213    for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
4214        SET_NODE *pSet = (*ii).second->pSets;
4215        SET_NODE *pFreeSet = pSet;
4216        while (pSet) {
4217            pFreeSet = pSet;
4218            pSet = pSet->pNext;
4219            // Freeing layouts handled in deleteLayouts() function
4220            // Free Update shadow struct tree
4221            freeShadowUpdateTree(pFreeSet);
4222            delete pFreeSet;
4223        }
4224        delete (*ii).second;
4225    }
4226    my_data->descriptorPoolMap.clear();
4227}
4228
4229// WARN : Once deleteLayouts() called, any layout ptrs in Pool/Set data structure will be invalid
4230// NOTE : Calls to this function should be wrapped in mutex
4231static void deleteLayouts(layer_data *my_data) {
4232    if (my_data->descriptorSetLayoutMap.size() <= 0)
4233        return;
4234    for (auto ii = my_data->descriptorSetLayoutMap.begin(); ii != my_data->descriptorSetLayoutMap.end(); ++ii) {
4235        LAYOUT_NODE *pLayout = (*ii).second;
4236        if (pLayout->createInfo.pBindings) {
4237            for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
4238                delete[] pLayout->createInfo.pBindings[i].pImmutableSamplers;
4239            }
4240            delete[] pLayout->createInfo.pBindings;
4241        }
4242        delete pLayout;
4243    }
4244    my_data->descriptorSetLayoutMap.clear();
4245}
4246
4247// Currently clearing a set is removing all previous updates to that set
4248//  TODO : Validate if this is correct clearing behavior
4249static void clearDescriptorSet(layer_data *my_data, VkDescriptorSet set) {
4250    SET_NODE *pSet = getSetNode(my_data, set);
4251    if (!pSet) {
4252        // TODO : Return error
4253    } else {
4254        freeShadowUpdateTree(pSet);
4255    }
4256}
4257
4258static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
4259                                VkDescriptorPoolResetFlags flags) {
4260    DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pool);
4261    if (!pPool) {
4262        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4263                (uint64_t)pool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
4264                "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkResetDescriptorPool() call", (uint64_t)pool);
4265    } else {
4266        // TODO: validate flags
4267        // For every set off of this pool, clear it
4268        SET_NODE *pSet = pPool->pSets;
4269        while (pSet) {
4270            clearDescriptorSet(my_data, pSet->set);
4271            pSet = pSet->pNext;
4272        }
4273        // Reset available count to max count for this pool
4274        for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
4275            pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
4276        }
4277    }
4278}
4279
4280// For given CB object, fetch associated CB Node from map
4281static GLOBAL_CB_NODE *getCBNode(layer_data *my_data, const VkCommandBuffer cb) {
4282    if (my_data->commandBufferMap.count(cb) == 0) {
4283        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4284                reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4285                "Attempt to use CommandBuffer %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb));
4286        return NULL;
4287    }
4288    return my_data->commandBufferMap[cb];
4289}
4290
4291// Free all CB Nodes
4292// NOTE : Calls to this function should be wrapped in mutex
4293static void deleteCommandBuffers(layer_data *my_data) {
4294    if (my_data->commandBufferMap.size() <= 0) {
4295        return;
4296    }
4297    for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
4298        delete (*ii).second;
4299    }
4300    my_data->commandBufferMap.clear();
4301}
4302
4303static VkBool32 report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
4304    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4305                   (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
4306                   "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
4307}
4308
4309VkBool32 validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
4310    if (!pCB->activeRenderPass)
4311        return VK_FALSE;
4312    VkBool32 skip_call = VK_FALSE;
4313    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS && cmd_type != CMD_EXECUTECOMMANDS) {
4314        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4315                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4316                             "Commands cannot be called in a subpass using secondary command buffers.");
4317    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
4318        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4319                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4320                             "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
4321    }
4322    return skip_call;
4323}
4324
4325static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4326    if (!(flags & VK_QUEUE_GRAPHICS_BIT))
4327        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4328                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4329                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
4330    return false;
4331}
4332
4333static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4334    if (!(flags & VK_QUEUE_COMPUTE_BIT))
4335        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4336                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4337                       "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
4338    return false;
4339}
4340
4341static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4342    if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
4343        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4344                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4345                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
4346    return false;
4347}
4348
4349// Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not
4350//  in the recording state or if there's an issue with the Cmd ordering
4351static VkBool32 addCmd(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
4352    VkBool32 skipCall = VK_FALSE;
4353    auto pool_data = my_data->commandPoolMap.find(pCB->createInfo.commandPool);
4354    if (pool_data != my_data->commandPoolMap.end()) {
4355        VkQueueFlags flags = my_data->physDevProperties.queue_family_properties[pool_data->second.queueFamilyIndex].queueFlags;
4356        switch (cmd) {
4357        case CMD_BINDPIPELINE:
4358        case CMD_BINDPIPELINEDELTA:
4359        case CMD_BINDDESCRIPTORSETS:
4360        case CMD_FILLBUFFER:
4361        case CMD_CLEARCOLORIMAGE:
4362        case CMD_SETEVENT:
4363        case CMD_RESETEVENT:
4364        case CMD_WAITEVENTS:
4365        case CMD_BEGINQUERY:
4366        case CMD_ENDQUERY:
4367        case CMD_RESETQUERYPOOL:
4368        case CMD_COPYQUERYPOOLRESULTS:
4369        case CMD_WRITETIMESTAMP:
4370            skipCall |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
4371            break;
4372        case CMD_SETVIEWPORTSTATE:
4373        case CMD_SETSCISSORSTATE:
4374        case CMD_SETLINEWIDTHSTATE:
4375        case CMD_SETDEPTHBIASSTATE:
4376        case CMD_SETBLENDSTATE:
4377        case CMD_SETDEPTHBOUNDSSTATE:
4378        case CMD_SETSTENCILREADMASKSTATE:
4379        case CMD_SETSTENCILWRITEMASKSTATE:
4380        case CMD_SETSTENCILREFERENCESTATE:
4381        case CMD_BINDINDEXBUFFER:
4382        case CMD_BINDVERTEXBUFFER:
4383        case CMD_DRAW:
4384        case CMD_DRAWINDEXED:
4385        case CMD_DRAWINDIRECT:
4386        case CMD_DRAWINDEXEDINDIRECT:
4387        case CMD_BLITIMAGE:
4388        case CMD_CLEARATTACHMENTS:
4389        case CMD_CLEARDEPTHSTENCILIMAGE:
4390        case CMD_RESOLVEIMAGE:
4391        case CMD_BEGINRENDERPASS:
4392        case CMD_NEXTSUBPASS:
4393        case CMD_ENDRENDERPASS:
4394            skipCall |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
4395            break;
4396        case CMD_DISPATCH:
4397        case CMD_DISPATCHINDIRECT:
4398            skipCall |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
4399            break;
4400        case CMD_COPYBUFFER:
4401        case CMD_COPYIMAGE:
4402        case CMD_COPYBUFFERTOIMAGE:
4403        case CMD_COPYIMAGETOBUFFER:
4404        case CMD_CLONEIMAGEDATA:
4405        case CMD_UPDATEBUFFER:
4406        case CMD_PIPELINEBARRIER:
4407        case CMD_EXECUTECOMMANDS:
4408            break;
4409        default:
4410            break;
4411        }
4412    }
4413    if (pCB->state != CB_RECORDING) {
4414        skipCall |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
4415        skipCall |= validateCmdsInCmdBuffer(my_data, pCB, cmd);
4416        CMD_NODE cmdNode = {};
4417        // init cmd node and append to end of cmd LL
4418        cmdNode.cmdNumber = ++pCB->numCmds;
4419        cmdNode.type = cmd;
4420        pCB->cmds.push_back(cmdNode);
4421    }
4422    return skipCall;
4423}
4424// Reset the command buffer state
4425//  Maintain the createInfo and set state to CB_NEW, but clear all other state
4426static void resetCB(layer_data *my_data, const VkCommandBuffer cb) {
4427    GLOBAL_CB_NODE *pCB = my_data->commandBufferMap[cb];
4428    if (pCB) {
4429        pCB->cmds.clear();
4430        // Reset CB state (note that createInfo is not cleared)
4431        pCB->commandBuffer = cb;
4432        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
4433        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
4434        pCB->numCmds = 0;
4435        memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
4436        pCB->state = CB_NEW;
4437        pCB->submitCount = 0;
4438        pCB->status = 0;
4439        pCB->viewports.clear();
4440        pCB->scissors.clear();
4441        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4442            // Before clearing lastBoundState, remove any CB bindings from all uniqueBoundSets
4443            for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4444                auto set_node = my_data->setMap.find(set);
4445                if (set_node != my_data->setMap.end()) {
4446                    set_node->second->boundCmdBuffers.erase(pCB->commandBuffer);
4447                }
4448            }
4449            pCB->lastBound[i].reset();
4450        }
4451        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
4452        pCB->activeRenderPass = 0;
4453        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
4454        pCB->activeSubpass = 0;
4455        pCB->framebuffer = 0;
4456        pCB->fenceId = 0;
4457        pCB->lastSubmittedFence = VK_NULL_HANDLE;
4458        pCB->lastSubmittedQueue = VK_NULL_HANDLE;
4459        pCB->destroyedSets.clear();
4460        pCB->updatedSets.clear();
4461        pCB->destroyedFramebuffers.clear();
4462        pCB->waitedEvents.clear();
4463        pCB->semaphores.clear();
4464        pCB->events.clear();
4465        pCB->waitedEventsBeforeQueryReset.clear();
4466        pCB->queryToStateMap.clear();
4467        pCB->activeQueries.clear();
4468        pCB->startedQueries.clear();
4469        pCB->imageLayoutMap.clear();
4470        pCB->eventToStageMap.clear();
4471        pCB->drawData.clear();
4472        pCB->currentDrawData.buffers.clear();
4473        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
4474        pCB->secondaryCommandBuffers.clear();
4475        pCB->activeDescriptorSets.clear();
4476        pCB->validate_functions.clear();
4477        pCB->pMemObjList.clear();
4478        pCB->eventUpdates.clear();
4479    }
4480}
4481
4482// Set PSO-related status bits for CB, including dynamic state set via PSO
4483static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe) {
4484    for (auto const & att : pPipe->attachments) {
4485        if (0 != att.colorWriteMask) {
4486            pCB->status |= CBSTATUS_COLOR_BLEND_WRITE_ENABLE;
4487        }
4488    }
4489    if (pPipe->dsStateCI.depthWriteEnable) {
4490        pCB->status |= CBSTATUS_DEPTH_WRITE_ENABLE;
4491    }
4492    if (pPipe->dsStateCI.stencilTestEnable) {
4493        pCB->status |= CBSTATUS_STENCIL_TEST_ENABLE;
4494    }
4495    // Account for any dynamic state not set via this PSO
4496    if (!pPipe->dynStateCI.dynamicStateCount) { // All state is static
4497        pCB->status = CBSTATUS_ALL;
4498    } else {
4499        // First consider all state on
4500        // Then unset any state that's noted as dynamic in PSO
4501        // Finally OR that into CB statemask
4502        CBStatusFlags psoDynStateMask = CBSTATUS_ALL;
4503        for (uint32_t i = 0; i < pPipe->dynStateCI.dynamicStateCount; i++) {
4504            switch (pPipe->dynStateCI.pDynamicStates[i]) {
4505            case VK_DYNAMIC_STATE_VIEWPORT:
4506                psoDynStateMask &= ~CBSTATUS_VIEWPORT_SET;
4507                break;
4508            case VK_DYNAMIC_STATE_SCISSOR:
4509                psoDynStateMask &= ~CBSTATUS_SCISSOR_SET;
4510                break;
4511            case VK_DYNAMIC_STATE_LINE_WIDTH:
4512                psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
4513                break;
4514            case VK_DYNAMIC_STATE_DEPTH_BIAS:
4515                psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
4516                break;
4517            case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
4518                psoDynStateMask &= ~CBSTATUS_BLEND_SET;
4519                break;
4520            case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
4521                psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
4522                break;
4523            case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
4524                psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
4525                break;
4526            case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
4527                psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
4528                break;
4529            case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
4530                psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
4531                break;
4532            default:
4533                // TODO : Flag error here
4534                break;
4535            }
4536        }
4537        pCB->status |= psoDynStateMask;
4538    }
4539}
4540
4541// Print the last bound Gfx Pipeline
4542static VkBool32 printPipeline(layer_data *my_data, const VkCommandBuffer cb) {
4543    VkBool32 skipCall = VK_FALSE;
4544    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4545    if (pCB) {
4546        PIPELINE_NODE *pPipeTrav = getPipeline(my_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
4547        if (!pPipeTrav) {
4548            // nothing to print
4549        } else {
4550            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4551                                __LINE__, DRAWSTATE_NONE, "DS", "%s",
4552                                vk_print_vkgraphicspipelinecreateinfo(&pPipeTrav->graphicsPipelineCI, "{DS}").c_str());
4553        }
4554    }
4555    return skipCall;
4556}
4557
4558static void printCB(layer_data *my_data, const VkCommandBuffer cb) {
4559    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4560    if (pCB && pCB->cmds.size() > 0) {
4561        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4562                DRAWSTATE_NONE, "DS", "Cmds in CB %p", (void *)cb);
4563        vector<CMD_NODE> cmds = pCB->cmds;
4564        for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) {
4565            // TODO : Need to pass cb as srcObj here
4566            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4567                    __LINE__, DRAWSTATE_NONE, "DS", "  CMD#%" PRIu64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str());
4568        }
4569    } else {
4570        // Nothing to print
4571    }
4572}
4573
4574static VkBool32 synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
4575    VkBool32 skipCall = VK_FALSE;
4576    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
4577        return skipCall;
4578    }
4579    skipCall |= printPipeline(my_data, cb);
4580    return skipCall;
4581}
4582
4583// Flags validation error if the associated call is made inside a render pass. The apiName
4584// routine should ONLY be called outside a render pass.
4585static VkBool32 insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4586    VkBool32 inside = VK_FALSE;
4587    if (pCB->activeRenderPass) {
4588        inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4589                         (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
4590                         "%s: It is invalid to issue this call inside an active render pass (%#" PRIxLEAST64 ")", apiName,
4591                         (uint64_t)pCB->activeRenderPass);
4592    }
4593    return inside;
4594}
4595
4596// Flags validation error if the associated call is made outside a render pass. The apiName
4597// routine should ONLY be called inside a render pass.
4598static VkBool32 outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4599    VkBool32 outside = VK_FALSE;
4600    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
4601        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
4602         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
4603        outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4604                          (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS",
4605                          "%s: This call must be issued inside an active render pass.", apiName);
4606    }
4607    return outside;
4608}
4609
4610static void init_core_validation(layer_data *my_data, const VkAllocationCallbacks *pAllocator) {
4611
4612    layer_debug_actions(my_data->report_data, my_data->logging_callback, pAllocator, "lunarg_core_validation");
4613
4614    if (!globalLockInitialized) {
4615        loader_platform_thread_create_mutex(&globalLock);
4616        globalLockInitialized = 1;
4617    }
4618#if MTMERGESOURCE
4619    // Zero out memory property data
4620    memset(&memProps, 0, sizeof(VkPhysicalDeviceMemoryProperties));
4621#endif
4622}
4623
4624VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4625vkCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
4626    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4627
4628    assert(chain_info->u.pLayerInfo);
4629    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4630    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
4631    if (fpCreateInstance == NULL)
4632        return VK_ERROR_INITIALIZATION_FAILED;
4633
4634    // Advance the link info for the next element on the chain
4635    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4636
4637    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
4638    if (result != VK_SUCCESS)
4639        return result;
4640
4641    layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
4642    my_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
4643    layer_init_instance_dispatch_table(*pInstance, my_data->instance_dispatch_table, fpGetInstanceProcAddr);
4644
4645    my_data->report_data = debug_report_create_instance(my_data->instance_dispatch_table, *pInstance,
4646                                                        pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
4647
4648    init_core_validation(my_data, pAllocator);
4649
4650    ValidateLayerOrdering(*pCreateInfo);
4651
4652    return result;
4653}
4654
4655/* hook DestroyInstance to remove tableInstanceMap entry */
4656VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
4657    // TODOSC : Shouldn't need any customization here
4658    dispatch_key key = get_dispatch_key(instance);
4659    // TBD: Need any locking this early, in case this function is called at the
4660    // same time by more than one thread?
4661    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
4662    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
4663    pTable->DestroyInstance(instance, pAllocator);
4664
4665    loader_platform_thread_lock_mutex(&globalLock);
4666    // Clean up logging callback, if any
4667    while (my_data->logging_callback.size() > 0) {
4668        VkDebugReportCallbackEXT callback = my_data->logging_callback.back();
4669        layer_destroy_msg_callback(my_data->report_data, callback, pAllocator);
4670        my_data->logging_callback.pop_back();
4671    }
4672
4673    layer_debug_report_destroy_instance(my_data->report_data);
4674    delete my_data->instance_dispatch_table;
4675    layer_data_map.erase(key);
4676    loader_platform_thread_unlock_mutex(&globalLock);
4677    if (layer_data_map.empty()) {
4678        // Release mutex when destroying last instance.
4679        loader_platform_thread_delete_mutex(&globalLock);
4680        globalLockInitialized = 0;
4681    }
4682}
4683
4684static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
4685    uint32_t i;
4686    // TBD: Need any locking, in case this function is called at the same time
4687    // by more than one thread?
4688    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4689    dev_data->device_extensions.wsi_enabled = false;
4690
4691    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4692    PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
4693    pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
4694    pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
4695    pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
4696    pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
4697    pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
4698
4699    for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4700        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
4701            dev_data->device_extensions.wsi_enabled = true;
4702    }
4703}
4704
4705VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
4706                                                              const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
4707    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4708
4709    assert(chain_info->u.pLayerInfo);
4710    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4711    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
4712    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
4713    if (fpCreateDevice == NULL) {
4714        return VK_ERROR_INITIALIZATION_FAILED;
4715    }
4716
4717    // Advance the link info for the next element on the chain
4718    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4719
4720    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
4721    if (result != VK_SUCCESS) {
4722        return result;
4723    }
4724
4725    loader_platform_thread_lock_mutex(&globalLock);
4726    layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
4727    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
4728
4729    // Setup device dispatch table
4730    my_device_data->device_dispatch_table = new VkLayerDispatchTable;
4731    layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr);
4732
4733    my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
4734    createDeviceRegisterExtensions(pCreateInfo, *pDevice);
4735    // Get physical device limits for this device
4736    my_instance_data->instance_dispatch_table->GetPhysicalDeviceProperties(gpu, &(my_device_data->physDevProperties.properties));
4737    uint32_t count;
4738    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
4739    my_device_data->physDevProperties.queue_family_properties.resize(count);
4740    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(
4741        gpu, &count, &my_device_data->physDevProperties.queue_family_properties[0]);
4742    // TODO: device limits should make sure these are compatible
4743    if (pCreateInfo->pEnabledFeatures) {
4744        my_device_data->physDevProperties.features = *pCreateInfo->pEnabledFeatures;
4745    } else {
4746        memset(&my_device_data->physDevProperties.features, 0, sizeof(VkPhysicalDeviceFeatures));
4747    }
4748    loader_platform_thread_unlock_mutex(&globalLock);
4749
4750    ValidateLayerOrdering(*pCreateInfo);
4751
4752    return result;
4753}
4754
4755// prototype
4756static void deleteRenderPasses(layer_data *);
4757VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
4758    // TODOSC : Shouldn't need any customization here
4759    dispatch_key key = get_dispatch_key(device);
4760    layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4761    // Free all the memory
4762    loader_platform_thread_lock_mutex(&globalLock);
4763    deletePipelines(dev_data);
4764    deleteRenderPasses(dev_data);
4765    deleteCommandBuffers(dev_data);
4766    deletePools(dev_data);
4767    deleteLayouts(dev_data);
4768    dev_data->imageViewMap.clear();
4769    dev_data->imageMap.clear();
4770    dev_data->imageSubresourceMap.clear();
4771    dev_data->imageLayoutMap.clear();
4772    dev_data->bufferViewMap.clear();
4773    dev_data->bufferMap.clear();
4774    loader_platform_thread_unlock_mutex(&globalLock);
4775#if MTMERGESOURCE
4776    VkBool32 skipCall = VK_FALSE;
4777    loader_platform_thread_lock_mutex(&globalLock);
4778    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4779            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()");
4780    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4781            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================");
4782    print_mem_list(dev_data, device);
4783    printCBList(dev_data, device);
4784    delete_cmd_buf_info_list(dev_data);
4785    // Report any memory leaks
4786    DEVICE_MEM_INFO *pInfo = NULL;
4787    if (dev_data->memObjMap.size() > 0) {
4788        for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
4789            pInfo = &(*ii).second;
4790            if (pInfo->allocInfo.allocationSize != 0) {
4791                // Valid Usage: All child objects created on device must have been destroyed prior to destroying device
4792                skipCall |=
4793                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4794                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK,
4795                            "MEM", "Mem Object %" PRIu64 " has not been freed. You should clean up this memory by calling "
4796                                   "vkFreeMemory(%" PRIu64 ") prior to vkDestroyDevice().",
4797                            (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem));
4798            }
4799        }
4800    }
4801    // Queues persist until device is destroyed
4802    delete_queue_info_list(dev_data);
4803    layer_debug_report_destroy_device(device);
4804    loader_platform_thread_unlock_mutex(&globalLock);
4805
4806#if DISPATCH_MAP_DEBUG
4807    fprintf(stderr, "Device: %p, key: %p\n", device, key);
4808#endif
4809    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4810    if (VK_FALSE == skipCall) {
4811        pDisp->DestroyDevice(device, pAllocator);
4812    }
4813#else
4814    dev_data->device_dispatch_table->DestroyDevice(device, pAllocator);
4815#endif
4816    delete dev_data->device_dispatch_table;
4817    layer_data_map.erase(key);
4818}
4819
4820#if MTMERGESOURCE
4821VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
4822vkGetPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties *pMemoryProperties) {
4823    layer_data *my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
4824    VkLayerInstanceDispatchTable *pInstanceTable = my_data->instance_dispatch_table;
4825    pInstanceTable->GetPhysicalDeviceMemoryProperties(physicalDevice, pMemoryProperties);
4826    memcpy(&memProps, pMemoryProperties, sizeof(VkPhysicalDeviceMemoryProperties));
4827}
4828#endif
4829
4830static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4831
4832VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4833vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
4834    return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
4835}
4836
4837VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4838vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
4839    return util_GetLayerProperties(ARRAY_SIZE(cv_global_layers), cv_global_layers, pCount, pProperties);
4840}
4841
4842// TODO: Why does this exist - can we just use global?
4843static const VkLayerProperties cv_device_layers[] = {{
4844    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
4845}};
4846
4847VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
4848                                                                                    const char *pLayerName, uint32_t *pCount,
4849                                                                                    VkExtensionProperties *pProperties) {
4850    if (pLayerName == NULL) {
4851        dispatch_key key = get_dispatch_key(physicalDevice);
4852        layer_data *my_data = get_my_data_ptr(key, layer_data_map);
4853        return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
4854    } else {
4855        return util_GetExtensionProperties(0, NULL, pCount, pProperties);
4856    }
4857}
4858
4859VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4860vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
4861    /* draw_state physical device layers are the same as global */
4862    return util_GetLayerProperties(ARRAY_SIZE(cv_device_layers), cv_device_layers, pCount, pProperties);
4863}
4864
4865// This validates that the initial layout specified in the command buffer for
4866// the IMAGE is the same
4867// as the global IMAGE layout
4868VkBool32 ValidateCmdBufImageLayouts(VkCommandBuffer cmdBuffer) {
4869    VkBool32 skip_call = VK_FALSE;
4870    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
4871    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
4872    for (auto cb_image_data : pCB->imageLayoutMap) {
4873        VkImageLayout imageLayout;
4874        if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4875            skip_call |=
4876                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4877                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image %" PRIu64 ".",
4878                        reinterpret_cast<const uint64_t &>(cb_image_data.first));
4879        } else {
4880            if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4881                // TODO: Set memory invalid which is in mem_tracker currently
4882            } else if (imageLayout != cb_image_data.second.initialLayout) {
4883                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4884                                     VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT,
4885                                     "DS", "Cannot submit cmd buffer using image with layout %s when "
4886                                           "first use is %s.",
4887                                     string_VkImageLayout(imageLayout), string_VkImageLayout(cb_image_data.second.initialLayout));
4888            }
4889            SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4890        }
4891    }
4892    return skip_call;
4893}
4894// Track which resources are in-flight by atomically incrementing their "in_use" count
4895VkBool32 validateAndIncrementResources(layer_data *my_data, GLOBAL_CB_NODE *pCB) {
4896    VkBool32 skip_call = VK_FALSE;
4897    for (auto drawDataElement : pCB->drawData) {
4898        for (auto buffer : drawDataElement.buffers) {
4899            auto buffer_data = my_data->bufferMap.find(buffer);
4900            if (buffer_data == my_data->bufferMap.end()) {
4901                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4902                                     (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4903                                     "Cannot submit cmd buffer using deleted buffer %" PRIu64 ".", (uint64_t)(buffer));
4904            } else {
4905                buffer_data->second.in_use.fetch_add(1);
4906            }
4907        }
4908    }
4909    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4910        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4911            auto setNode = my_data->setMap.find(set);
4912            if (setNode == my_data->setMap.end()) {
4913                skip_call |=
4914                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4915                            (uint64_t)(set), __LINE__, DRAWSTATE_INVALID_DESCRIPTOR_SET, "DS",
4916                            "Cannot submit cmd buffer using deleted descriptor set %" PRIu64 ".", (uint64_t)(set));
4917            } else {
4918                setNode->second->in_use.fetch_add(1);
4919            }
4920        }
4921    }
4922    for (auto semaphore : pCB->semaphores) {
4923        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4924        if (semaphoreNode == my_data->semaphoreMap.end()) {
4925            skip_call |=
4926                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4927                        reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
4928                        "Cannot submit cmd buffer using deleted semaphore %" PRIu64 ".", reinterpret_cast<uint64_t &>(semaphore));
4929        } else {
4930            semaphoreNode->second.in_use.fetch_add(1);
4931        }
4932    }
4933    for (auto event : pCB->events) {
4934        auto eventNode = my_data->eventMap.find(event);
4935        if (eventNode == my_data->eventMap.end()) {
4936            skip_call |=
4937                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4938                        reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
4939                        "Cannot submit cmd buffer using deleted event %" PRIu64 ".", reinterpret_cast<uint64_t &>(event));
4940        } else {
4941            eventNode->second.in_use.fetch_add(1);
4942        }
4943    }
4944    return skip_call;
4945}
4946
4947void decrementResources(layer_data *my_data, VkCommandBuffer cmdBuffer) {
4948    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
4949    for (auto drawDataElement : pCB->drawData) {
4950        for (auto buffer : drawDataElement.buffers) {
4951            auto buffer_data = my_data->bufferMap.find(buffer);
4952            if (buffer_data != my_data->bufferMap.end()) {
4953                buffer_data->second.in_use.fetch_sub(1);
4954            }
4955        }
4956    }
4957    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4958        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4959            auto setNode = my_data->setMap.find(set);
4960            if (setNode != my_data->setMap.end()) {
4961                setNode->second->in_use.fetch_sub(1);
4962            }
4963        }
4964    }
4965    for (auto semaphore : pCB->semaphores) {
4966        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4967        if (semaphoreNode != my_data->semaphoreMap.end()) {
4968            semaphoreNode->second.in_use.fetch_sub(1);
4969        }
4970    }
4971    for (auto event : pCB->events) {
4972        auto eventNode = my_data->eventMap.find(event);
4973        if (eventNode != my_data->eventMap.end()) {
4974            eventNode->second.in_use.fetch_sub(1);
4975        }
4976    }
4977    for (auto queryStatePair : pCB->queryToStateMap) {
4978        my_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4979    }
4980    for (auto eventStagePair : pCB->eventToStageMap) {
4981        my_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4982    }
4983}
4984
4985void decrementResources(layer_data *my_data, uint32_t fenceCount, const VkFence *pFences) {
4986    for (uint32_t i = 0; i < fenceCount; ++i) {
4987        auto fence_data = my_data->fenceMap.find(pFences[i]);
4988        if (fence_data == my_data->fenceMap.end() || !fence_data->second.needsSignaled)
4989            return;
4990        fence_data->second.needsSignaled = false;
4991        fence_data->second.in_use.fetch_sub(1);
4992        decrementResources(my_data, fence_data->second.priorFences.size(), fence_data->second.priorFences.data());
4993        for (auto cmdBuffer : fence_data->second.cmdBuffers) {
4994            decrementResources(my_data, cmdBuffer);
4995        }
4996    }
4997}
4998
4999void decrementResources(layer_data *my_data, VkQueue queue) {
5000    auto queue_data = my_data->queueMap.find(queue);
5001    if (queue_data != my_data->queueMap.end()) {
5002        for (auto cmdBuffer : queue_data->second.untrackedCmdBuffers) {
5003            decrementResources(my_data, cmdBuffer);
5004        }
5005        queue_data->second.untrackedCmdBuffers.clear();
5006        decrementResources(my_data, queue_data->second.lastFences.size(), queue_data->second.lastFences.data());
5007    }
5008}
5009
5010void updateTrackedCommandBuffers(layer_data *dev_data, VkQueue queue, VkQueue other_queue, VkFence fence) {
5011    if (queue == other_queue) {
5012        return;
5013    }
5014    auto queue_data = dev_data->queueMap.find(queue);
5015    auto other_queue_data = dev_data->queueMap.find(other_queue);
5016    if (queue_data == dev_data->queueMap.end() || other_queue_data == dev_data->queueMap.end()) {
5017        return;
5018    }
5019    for (auto fence : other_queue_data->second.lastFences) {
5020        queue_data->second.lastFences.push_back(fence);
5021    }
5022    if (fence != VK_NULL_HANDLE) {
5023        auto fence_data = dev_data->fenceMap.find(fence);
5024        if (fence_data == dev_data->fenceMap.end()) {
5025            return;
5026        }
5027        for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) {
5028            fence_data->second.cmdBuffers.push_back(cmdbuffer);
5029        }
5030        other_queue_data->second.untrackedCmdBuffers.clear();
5031    } else {
5032        for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) {
5033            queue_data->second.untrackedCmdBuffers.push_back(cmdbuffer);
5034        }
5035        other_queue_data->second.untrackedCmdBuffers.clear();
5036    }
5037    for (auto eventStagePair : other_queue_data->second.eventToStageMap) {
5038        queue_data->second.eventToStageMap[eventStagePair.first] = eventStagePair.second;
5039    }
5040}
5041
5042void trackCommandBuffers(layer_data *my_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
5043    auto queue_data = my_data->queueMap.find(queue);
5044    if (fence != VK_NULL_HANDLE) {
5045        vector<VkFence> prior_fences;
5046        auto fence_data = my_data->fenceMap.find(fence);
5047        if (fence_data == my_data->fenceMap.end()) {
5048            return;
5049        }
5050        if (queue_data != my_data->queueMap.end()) {
5051            prior_fences = queue_data->second.lastFences;
5052            queue_data->second.lastFences.clear();
5053            queue_data->second.lastFences.push_back(fence);
5054            for (auto cmdbuffer : queue_data->second.untrackedCmdBuffers) {
5055                fence_data->second.cmdBuffers.push_back(cmdbuffer);
5056            }
5057            queue_data->second.untrackedCmdBuffers.clear();
5058        }
5059        fence_data->second.cmdBuffers.clear();
5060        fence_data->second.priorFences = prior_fences;
5061        fence_data->second.needsSignaled = true;
5062        fence_data->second.queue = queue;
5063        fence_data->second.in_use.fetch_add(1);
5064        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5065            const VkSubmitInfo *submit = &pSubmits[submit_idx];
5066            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5067                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5068                    fence_data->second.cmdBuffers.push_back(secondaryCmdBuffer);
5069                }
5070                fence_data->second.cmdBuffers.push_back(submit->pCommandBuffers[i]);
5071            }
5072        }
5073    } else {
5074        if (queue_data != my_data->queueMap.end()) {
5075            for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5076                const VkSubmitInfo *submit = &pSubmits[submit_idx];
5077                for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5078                    for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5079                        queue_data->second.untrackedCmdBuffers.push_back(secondaryCmdBuffer);
5080                    }
5081                    queue_data->second.untrackedCmdBuffers.push_back(submit->pCommandBuffers[i]);
5082                }
5083            }
5084        }
5085    }
5086    if (queue_data != my_data->queueMap.end()) {
5087        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5088            const VkSubmitInfo *submit = &pSubmits[submit_idx];
5089            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5090                // Add cmdBuffers to both the global set and queue set
5091                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5092                    my_data->globalInFlightCmdBuffers.insert(secondaryCmdBuffer);
5093                    queue_data->second.inFlightCmdBuffers.insert(secondaryCmdBuffer);
5094                }
5095                my_data->globalInFlightCmdBuffers.insert(submit->pCommandBuffers[i]);
5096                queue_data->second.inFlightCmdBuffers.insert(submit->pCommandBuffers[i]);
5097            }
5098        }
5099    }
5100}
5101
5102bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5103    bool skip_call = false;
5104    if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
5105        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
5106        skip_call |=
5107            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5108                    __LINE__, DRAWSTATE_INVALID_FENCE, "DS", "Command Buffer %#" PRIx64 " is already in use and is not marked "
5109                                                             "for simultaneous use.",
5110                    reinterpret_cast<uint64_t>(pCB->commandBuffer));
5111    }
5112    return skip_call;
5113}
5114
5115static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5116    bool skipCall = false;
5117    // Validate that cmd buffers have been updated
5118    if (CB_RECORDED != pCB->state) {
5119        if (CB_INVALID == pCB->state) {
5120            // Inform app of reason CB invalid
5121            bool causeReported = false;
5122            if (!pCB->destroyedSets.empty()) {
5123                std::stringstream set_string;
5124                for (auto set : pCB->destroyedSets)
5125                    set_string << " " << set;
5126
5127                skipCall |=
5128                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5129                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5130                            "You are submitting command buffer %#" PRIxLEAST64
5131                            " that is invalid because it had the following bound descriptor set(s) destroyed: %s",
5132                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
5133                causeReported = true;
5134            }
5135            if (!pCB->updatedSets.empty()) {
5136                std::stringstream set_string;
5137                for (auto set : pCB->updatedSets)
5138                    set_string << " " << set;
5139
5140                skipCall |=
5141                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5142                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5143                            "You are submitting command buffer %#" PRIxLEAST64
5144                            " that is invalid because it had the following bound descriptor set(s) updated: %s",
5145                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
5146                causeReported = true;
5147            }
5148            if (!pCB->destroyedFramebuffers.empty()) {
5149                std::stringstream fb_string;
5150                for (auto fb : pCB->destroyedFramebuffers)
5151                    fb_string << " " << fb;
5152
5153                skipCall |=
5154                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5155                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5156                            "You are submitting command buffer %#" PRIxLEAST64 " that is invalid because it had the following "
5157                            "referenced framebuffers destroyed: %s",
5158                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), fb_string.str().c_str());
5159                causeReported = true;
5160            }
5161            // TODO : This is defensive programming to make sure an error is
5162            //  flagged if we hit this INVALID cmd buffer case and none of the
5163            //  above cases are hit. As the number of INVALID cases grows, this
5164            //  code should be updated to seemlessly handle all the cases.
5165            if (!causeReported) {
5166                skipCall |= log_msg(
5167                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5168                    reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5169                    "You are submitting command buffer %#" PRIxLEAST64 " that is invalid due to an unknown cause. Validation "
5170                    "should "
5171                    "be improved to report the exact cause.",
5172                    reinterpret_cast<uint64_t &>(pCB->commandBuffer));
5173            }
5174        } else { // Flag error for using CB w/o vkEndCommandBuffer() called
5175            skipCall |=
5176                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5177                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
5178                        "You must call vkEndCommandBuffer() on CB %#" PRIxLEAST64 " before this call to vkQueueSubmit()!",
5179                        (uint64_t)(pCB->commandBuffer));
5180        }
5181    }
5182    return skipCall;
5183}
5184
5185static VkBool32 validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5186    // Track in-use for resources off of primary and any secondary CBs
5187    VkBool32 skipCall = validateAndIncrementResources(dev_data, pCB);
5188    if (!pCB->secondaryCommandBuffers.empty()) {
5189        for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
5190            skipCall |= validateAndIncrementResources(dev_data, dev_data->commandBufferMap[secondaryCmdBuffer]);
5191            GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
5192            if (pSubCB->primaryCommandBuffer != pCB->commandBuffer) {
5193                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5194                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
5195                        "CB %#" PRIxLEAST64 " was submitted with secondary buffer %#" PRIxLEAST64
5196                        " but that buffer has subsequently been bound to "
5197                        "primary cmd buffer %#" PRIxLEAST64 ".",
5198                        reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer),
5199                        reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer));
5200            }
5201        }
5202    }
5203    // TODO : Verify if this also needs to be checked for secondary command
5204    //  buffers. If so, this block of code can move to
5205    //   validateCommandBufferState() function. vulkan GL106 filed to clarify
5206    if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
5207        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5208                            __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
5209                            "CB %#" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
5210                            "set, but has been submitted %#" PRIxLEAST64 " times.",
5211                            (uint64_t)(pCB->commandBuffer), pCB->submitCount);
5212    }
5213    skipCall |= validateCommandBufferState(dev_data, pCB);
5214    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
5215    // on device
5216    skipCall |= validateCommandBufferSimultaneousUse(dev_data, pCB);
5217    return skipCall;
5218}
5219
5220VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5221vkQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
5222    VkBool32 skipCall = VK_FALSE;
5223    GLOBAL_CB_NODE *pCBNode = NULL;
5224    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5225    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5226    loader_platform_thread_lock_mutex(&globalLock);
5227#if MTMERGESOURCE
5228    // TODO : Need to track fence and clear mem references when fence clears
5229    // MTMTODO : Merge this code with code below to avoid duplicating efforts
5230    uint64_t fenceId = 0;
5231    skipCall = add_fence_info(dev_data, fence, queue, &fenceId);
5232
5233    print_mem_list(dev_data, queue);
5234    printCBList(dev_data, queue);
5235    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5236        const VkSubmitInfo *submit = &pSubmits[submit_idx];
5237        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
5238            pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
5239            if (pCBNode) {
5240                pCBNode->fenceId = fenceId;
5241                pCBNode->lastSubmittedFence = fence;
5242                pCBNode->lastSubmittedQueue = queue;
5243                for (auto &function : pCBNode->validate_functions) {
5244                    skipCall |= function();
5245                }
5246                for (auto &function : pCBNode->eventUpdates) {
5247                    skipCall |= static_cast<VkBool32>(function(queue));
5248                }
5249            }
5250        }
5251
5252        for (uint32_t i = 0; i < submit->waitSemaphoreCount; i++) {
5253            VkSemaphore sem = submit->pWaitSemaphores[i];
5254
5255            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
5256                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_SIGNALLED) {
5257                    skipCall =
5258                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5259                                (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
5260                                "vkQueueSubmit: Semaphore must be in signaled state before passing to pWaitSemaphores");
5261                }
5262                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_WAIT;
5263            }
5264        }
5265        for (uint32_t i = 0; i < submit->signalSemaphoreCount; i++) {
5266            VkSemaphore sem = submit->pSignalSemaphores[i];
5267
5268            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
5269                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_UNSET) {
5270                    skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5271                                       VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, (uint64_t)sem, __LINE__, MEMTRACK_NONE,
5272                                       "SEMAPHORE", "vkQueueSubmit: Semaphore must not be currently signaled or in a wait state");
5273                }
5274                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
5275            }
5276        }
5277    }
5278#endif
5279    // First verify that fence is not in use
5280    if ((fence != VK_NULL_HANDLE) && (submitCount != 0) && dev_data->fenceMap[fence].in_use.load()) {
5281        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5282                            (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5283                            "Fence %#" PRIx64 " is already in use by another submission.", (uint64_t)(fence));
5284    }
5285    // Now verify each individual submit
5286    std::unordered_set<VkQueue> processed_other_queues;
5287    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5288        const VkSubmitInfo *submit = &pSubmits[submit_idx];
5289        vector<VkSemaphore> semaphoreList;
5290        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
5291            const VkSemaphore &semaphore = submit->pWaitSemaphores[i];
5292            semaphoreList.push_back(semaphore);
5293            if (dev_data->semaphoreMap[semaphore].signaled) {
5294                dev_data->semaphoreMap[semaphore].signaled = 0;
5295            } else {
5296                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5297                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS,
5298                                    "DS", "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
5299                                    reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
5300            }
5301            const VkQueue &other_queue = dev_data->semaphoreMap[semaphore].queue;
5302            if (other_queue != VK_NULL_HANDLE && !processed_other_queues.count(other_queue)) {
5303                updateTrackedCommandBuffers(dev_data, queue, other_queue, fence);
5304                processed_other_queues.insert(other_queue);
5305            }
5306        }
5307        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
5308            const VkSemaphore &semaphore = submit->pSignalSemaphores[i];
5309            semaphoreList.push_back(semaphore);
5310            if (dev_data->semaphoreMap[semaphore].signaled) {
5311                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5312                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS,
5313                                    "DS", "Queue %#" PRIx64 " is signaling semaphore %#" PRIx64
5314                                          " that has already been signaled but not waited on by queue %#" PRIx64 ".",
5315                                    reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore),
5316                                    reinterpret_cast<uint64_t &>(dev_data->semaphoreMap[semaphore].queue));
5317            } else {
5318                dev_data->semaphoreMap[semaphore].signaled = 1;
5319                dev_data->semaphoreMap[semaphore].queue = queue;
5320            }
5321        }
5322        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
5323            skipCall |= ValidateCmdBufImageLayouts(submit->pCommandBuffers[i]);
5324            pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
5325            pCBNode->semaphores = semaphoreList;
5326            pCBNode->submitCount++; // increment submit count
5327            skipCall |= validatePrimaryCommandBufferState(dev_data, pCBNode);
5328        }
5329    }
5330    // Update cmdBuffer-related data structs and mark fence in-use
5331    trackCommandBuffers(dev_data, queue, submitCount, pSubmits, fence);
5332    loader_platform_thread_unlock_mutex(&globalLock);
5333    if (VK_FALSE == skipCall)
5334        result = dev_data->device_dispatch_table->QueueSubmit(queue, submitCount, pSubmits, fence);
5335#if MTMERGESOURCE
5336    loader_platform_thread_lock_mutex(&globalLock);
5337    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5338        const VkSubmitInfo *submit = &pSubmits[submit_idx];
5339        for (uint32_t i = 0; i < submit->waitSemaphoreCount; i++) {
5340            VkSemaphore sem = submit->pWaitSemaphores[i];
5341
5342            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
5343                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_UNSET;
5344            }
5345        }
5346    }
5347    loader_platform_thread_unlock_mutex(&globalLock);
5348#endif
5349    return result;
5350}
5351
5352#if MTMERGESOURCE
5353VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
5354                                                                const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
5355    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5356    VkResult result = my_data->device_dispatch_table->AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
5357    // TODO : Track allocations and overall size here
5358    loader_platform_thread_lock_mutex(&globalLock);
5359    add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo);
5360    print_mem_list(my_data, device);
5361    loader_platform_thread_unlock_mutex(&globalLock);
5362    return result;
5363}
5364
5365VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5366vkFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
5367    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5368
5369    // From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed.
5370    // Before freeing a memory object, an application must ensure the memory object is no longer
5371    // in use by the device—for example by command buffers queued for execution. The memory need
5372    // not yet be unbound from all images and buffers, but any further use of those images or
5373    // buffers (on host or device) for anything other than destroying those objects will result in
5374    // undefined behavior.
5375
5376    loader_platform_thread_lock_mutex(&globalLock);
5377    freeMemObjInfo(my_data, device, mem, VK_FALSE);
5378    print_mem_list(my_data, device);
5379    printCBList(my_data, device);
5380    loader_platform_thread_unlock_mutex(&globalLock);
5381    my_data->device_dispatch_table->FreeMemory(device, mem, pAllocator);
5382}
5383
5384VkBool32 validateMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5385    VkBool32 skipCall = VK_FALSE;
5386
5387    if (size == 0) {
5388        // TODO: a size of 0 is not listed as an invalid use in the spec, should it be?
5389        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5390                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5391                           "VkMapMemory: Attempting to map memory range of size zero");
5392    }
5393
5394    auto mem_element = my_data->memObjMap.find(mem);
5395    if (mem_element != my_data->memObjMap.end()) {
5396        // It is an application error to call VkMapMemory on an object that is already mapped
5397        if (mem_element->second.memRange.size != 0) {
5398            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5399                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5400                               "VkMapMemory: Attempting to map memory on an already-mapped object %#" PRIxLEAST64, (uint64_t)mem);
5401        }
5402
5403        // Validate that offset + size is within object's allocationSize
5404        if (size == VK_WHOLE_SIZE) {
5405            if (offset >= mem_element->second.allocInfo.allocationSize) {
5406                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5407                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5408                                   "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset,
5409                                   mem_element->second.allocInfo.allocationSize, mem_element->second.allocInfo.allocationSize);
5410            }
5411        } else {
5412            if ((offset + size) > mem_element->second.allocInfo.allocationSize) {
5413                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5414                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5415                                   "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset,
5416                                   size + offset, mem_element->second.allocInfo.allocationSize);
5417            }
5418        }
5419    }
5420    return skipCall;
5421}
5422
5423void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5424    auto mem_element = my_data->memObjMap.find(mem);
5425    if (mem_element != my_data->memObjMap.end()) {
5426        MemRange new_range;
5427        new_range.offset = offset;
5428        new_range.size = size;
5429        mem_element->second.memRange = new_range;
5430    }
5431}
5432
5433VkBool32 deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
5434    VkBool32 skipCall = VK_FALSE;
5435    auto mem_element = my_data->memObjMap.find(mem);
5436    if (mem_element != my_data->memObjMap.end()) {
5437        if (!mem_element->second.memRange.size) {
5438            // Valid Usage: memory must currently be mapped
5439            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5440                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5441                               "Unmapping Memory without memory being mapped: mem obj %#" PRIxLEAST64, (uint64_t)mem);
5442        }
5443        mem_element->second.memRange.size = 0;
5444        if (mem_element->second.pData) {
5445            free(mem_element->second.pData);
5446            mem_element->second.pData = 0;
5447        }
5448    }
5449    return skipCall;
5450}
5451
5452static char NoncoherentMemoryFillValue = 0xb;
5453
5454void initializeAndTrackMemory(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize size, void **ppData) {
5455    auto mem_element = my_data->memObjMap.find(mem);
5456    if (mem_element != my_data->memObjMap.end()) {
5457        mem_element->second.pDriverData = *ppData;
5458        uint32_t index = mem_element->second.allocInfo.memoryTypeIndex;
5459        if (memProps.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
5460            mem_element->second.pData = 0;
5461        } else {
5462            if (size == VK_WHOLE_SIZE) {
5463                size = mem_element->second.allocInfo.allocationSize;
5464            }
5465            size_t convSize = (size_t)(size);
5466            mem_element->second.pData = malloc(2 * convSize);
5467            memset(mem_element->second.pData, NoncoherentMemoryFillValue, 2 * convSize);
5468            *ppData = static_cast<char *>(mem_element->second.pData) + (convSize / 2);
5469        }
5470    }
5471}
5472#endif
5473// Note: This function assumes that the global lock is held by the calling
5474// thread.
5475VkBool32 cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) {
5476    VkBool32 skip_call = VK_FALSE;
5477    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
5478    if (pCB) {
5479        for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) {
5480            for (auto event : queryEventsPair.second) {
5481                if (my_data->eventMap[event].needsSignaled) {
5482                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5483                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
5484                                         "Cannot get query results on queryPool %" PRIu64
5485                                         " with index %d which was guarded by unsignaled event %" PRIu64 ".",
5486                                         (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
5487                }
5488            }
5489        }
5490    }
5491    return skip_call;
5492}
5493// Remove given cmd_buffer from the global inFlight set.
5494//  Also, if given queue is valid, then remove the cmd_buffer from that queues
5495//  inFlightCmdBuffer set. Finally, check all other queues and if given cmd_buffer
5496//  is still in flight on another queue, add it back into the global set.
5497// Note: This function assumes that the global lock is held by the calling
5498// thread.
5499static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkQueue queue) {
5500    // Pull it off of global list initially, but if we find it in any other queue list, add it back in
5501    dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
5502    if (dev_data->queueMap.find(queue) != dev_data->queueMap.end()) {
5503        dev_data->queueMap[queue].inFlightCmdBuffers.erase(cmd_buffer);
5504        for (auto q : dev_data->queues) {
5505            if ((q != queue) &&
5506                (dev_data->queueMap[q].inFlightCmdBuffers.find(cmd_buffer) != dev_data->queueMap[q].inFlightCmdBuffers.end())) {
5507                dev_data->globalInFlightCmdBuffers.insert(cmd_buffer);
5508                break;
5509            }
5510        }
5511    }
5512}
5513#if MTMERGESOURCE
5514static inline bool verifyFenceStatus(VkDevice device, VkFence fence, const char *apiCall) {
5515    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5516    VkBool32 skipCall = false;
5517    auto pFenceInfo = my_data->fenceMap.find(fence);
5518    if (pFenceInfo != my_data->fenceMap.end()) {
5519        if (pFenceInfo->second.firstTimeFlag != VK_TRUE) {
5520            if ((pFenceInfo->second.createInfo.flags & VK_FENCE_CREATE_SIGNALED_BIT) &&
5521                pFenceInfo->second.firstTimeFlag != VK_TRUE) {
5522                skipCall |=
5523                    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5524                            (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5525                            "%s specified fence %#" PRIxLEAST64 " already in SIGNALED state.", apiCall, (uint64_t)fence);
5526            }
5527            if (!pFenceInfo->second.queue && !pFenceInfo->second.swapchain) { // Checking status of unsubmitted fence
5528                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5529                                    reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5530                                    "%s called for fence %#" PRIxLEAST64 " which has not been submitted on a Queue or during "
5531                                    "acquire next image.",
5532                                    apiCall, reinterpret_cast<uint64_t &>(fence));
5533            }
5534        } else {
5535            pFenceInfo->second.firstTimeFlag = VK_FALSE;
5536        }
5537    }
5538    return skipCall;
5539}
5540#endif
5541VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5542vkWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
5543    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5544    VkBool32 skip_call = VK_FALSE;
5545#if MTMERGESOURCE
5546    // Verify fence status of submitted fences
5547    loader_platform_thread_lock_mutex(&globalLock);
5548    for (uint32_t i = 0; i < fenceCount; i++) {
5549        skip_call |= verifyFenceStatus(device, pFences[i], "vkWaitForFences");
5550    }
5551    loader_platform_thread_unlock_mutex(&globalLock);
5552    if (skip_call)
5553        return VK_ERROR_VALIDATION_FAILED_EXT;
5554#endif
5555    VkResult result = dev_data->device_dispatch_table->WaitForFences(device, fenceCount, pFences, waitAll, timeout);
5556
5557    if (result == VK_SUCCESS) {
5558        loader_platform_thread_lock_mutex(&globalLock);
5559        // When we know that all fences are complete we can clean/remove their CBs
5560        if (waitAll || fenceCount == 1) {
5561            for (uint32_t i = 0; i < fenceCount; ++i) {
5562#if MTMERGESOURCE
5563                update_fence_tracking(dev_data, pFences[i]);
5564#endif
5565                VkQueue fence_queue = dev_data->fenceMap[pFences[i]].queue;
5566                for (auto cmdBuffer : dev_data->fenceMap[pFences[i]].cmdBuffers) {
5567                    skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5568                    removeInFlightCmdBuffer(dev_data, cmdBuffer, fence_queue);
5569                }
5570            }
5571            decrementResources(dev_data, fenceCount, pFences);
5572        }
5573        // NOTE : Alternate case not handled here is when some fences have completed. In
5574        //  this case for app to guarantee which fences completed it will have to call
5575        //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
5576        loader_platform_thread_unlock_mutex(&globalLock);
5577    }
5578    if (VK_FALSE != skip_call)
5579        return VK_ERROR_VALIDATION_FAILED_EXT;
5580    return result;
5581}
5582
5583VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(VkDevice device, VkFence fence) {
5584    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5585    bool skipCall = false;
5586    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5587#if MTMERGESOURCE
5588    loader_platform_thread_lock_mutex(&globalLock);
5589    skipCall = verifyFenceStatus(device, fence, "vkGetFenceStatus");
5590    loader_platform_thread_unlock_mutex(&globalLock);
5591    if (skipCall)
5592        return result;
5593#endif
5594    result = dev_data->device_dispatch_table->GetFenceStatus(device, fence);
5595    VkBool32 skip_call = VK_FALSE;
5596    loader_platform_thread_lock_mutex(&globalLock);
5597    if (result == VK_SUCCESS) {
5598#if MTMERGESOURCE
5599        update_fence_tracking(dev_data, fence);
5600#endif
5601        auto fence_queue = dev_data->fenceMap[fence].queue;
5602        for (auto cmdBuffer : dev_data->fenceMap[fence].cmdBuffers) {
5603            skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5604            removeInFlightCmdBuffer(dev_data, cmdBuffer, fence_queue);
5605        }
5606        decrementResources(dev_data, 1, &fence);
5607    }
5608    loader_platform_thread_unlock_mutex(&globalLock);
5609    if (VK_FALSE != skip_call)
5610        return VK_ERROR_VALIDATION_FAILED_EXT;
5611    return result;
5612}
5613
5614VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5615vkGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
5616    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5617    dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
5618    loader_platform_thread_lock_mutex(&globalLock);
5619    dev_data->queues.push_back(*pQueue);
5620    QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue];
5621    pQNode->device = device;
5622#if MTMERGESOURCE
5623    pQNode->lastRetiredId = 0;
5624    pQNode->lastSubmittedId = 0;
5625#endif
5626    loader_platform_thread_unlock_mutex(&globalLock);
5627}
5628
5629VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(VkQueue queue) {
5630    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5631    decrementResources(dev_data, queue);
5632    VkBool32 skip_call = VK_FALSE;
5633    loader_platform_thread_lock_mutex(&globalLock);
5634    // Iterate over local set since we erase set members as we go in for loop
5635    auto local_cb_set = dev_data->queueMap[queue].inFlightCmdBuffers;
5636    for (auto cmdBuffer : local_cb_set) {
5637        skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5638        removeInFlightCmdBuffer(dev_data, cmdBuffer, queue);
5639    }
5640    dev_data->queueMap[queue].inFlightCmdBuffers.clear();
5641    loader_platform_thread_unlock_mutex(&globalLock);
5642    if (VK_FALSE != skip_call)
5643        return VK_ERROR_VALIDATION_FAILED_EXT;
5644    VkResult result = dev_data->device_dispatch_table->QueueWaitIdle(queue);
5645#if MTMERGESOURCE
5646    if (VK_SUCCESS == result) {
5647        loader_platform_thread_lock_mutex(&globalLock);
5648        retire_queue_fences(dev_data, queue);
5649        loader_platform_thread_unlock_mutex(&globalLock);
5650    }
5651#endif
5652    return result;
5653}
5654
5655VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(VkDevice device) {
5656    VkBool32 skip_call = VK_FALSE;
5657    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5658    loader_platform_thread_lock_mutex(&globalLock);
5659    for (auto queue : dev_data->queues) {
5660        decrementResources(dev_data, queue);
5661        if (dev_data->queueMap.find(queue) != dev_data->queueMap.end()) {
5662            // Clear all of the queue inFlightCmdBuffers (global set cleared below)
5663            dev_data->queueMap[queue].inFlightCmdBuffers.clear();
5664        }
5665    }
5666    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5667        skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5668    }
5669    dev_data->globalInFlightCmdBuffers.clear();
5670    loader_platform_thread_unlock_mutex(&globalLock);
5671    if (VK_FALSE != skip_call)
5672        return VK_ERROR_VALIDATION_FAILED_EXT;
5673    VkResult result = dev_data->device_dispatch_table->DeviceWaitIdle(device);
5674#if MTMERGESOURCE
5675    if (VK_SUCCESS == result) {
5676        loader_platform_thread_lock_mutex(&globalLock);
5677        retire_device_fences(dev_data, device);
5678        loader_platform_thread_unlock_mutex(&globalLock);
5679    }
5680#endif
5681    return result;
5682}
5683
5684VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
5685    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5686    bool skipCall = false;
5687    loader_platform_thread_lock_mutex(&globalLock);
5688    if (dev_data->fenceMap[fence].in_use.load()) {
5689        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5690                            (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5691                            "Fence %#" PRIx64 " is in use by a command buffer.", (uint64_t)(fence));
5692    }
5693#if MTMERGESOURCE
5694    delete_fence_info(dev_data, fence);
5695    auto item = dev_data->fenceMap.find(fence);
5696    if (item != dev_data->fenceMap.end()) {
5697        dev_data->fenceMap.erase(item);
5698    }
5699#endif
5700    loader_platform_thread_unlock_mutex(&globalLock);
5701    if (!skipCall)
5702        dev_data->device_dispatch_table->DestroyFence(device, fence, pAllocator);
5703}
5704
5705VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5706vkDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
5707    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5708    dev_data->device_dispatch_table->DestroySemaphore(device, semaphore, pAllocator);
5709    loader_platform_thread_lock_mutex(&globalLock);
5710    auto item = dev_data->semaphoreMap.find(semaphore);
5711    if (item != dev_data->semaphoreMap.end()) {
5712        if (item->second.in_use.load()) {
5713            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5714                    reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
5715                    "Cannot delete semaphore %" PRIx64 " which is in use.", reinterpret_cast<uint64_t &>(semaphore));
5716        }
5717        dev_data->semaphoreMap.erase(semaphore);
5718    }
5719    loader_platform_thread_unlock_mutex(&globalLock);
5720    // TODO : Clean up any internal data structures using this obj.
5721}
5722
5723VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
5724    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5725    bool skip_call = false;
5726    loader_platform_thread_lock_mutex(&globalLock);
5727    auto event_data = dev_data->eventMap.find(event);
5728    if (event_data != dev_data->eventMap.end()) {
5729        if (event_data->second.in_use.load()) {
5730            skip_call |= log_msg(
5731                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5732                reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
5733                "Cannot delete event %" PRIx64 " which is in use by a command buffer.", reinterpret_cast<uint64_t &>(event));
5734        }
5735        dev_data->eventMap.erase(event_data);
5736    }
5737    loader_platform_thread_unlock_mutex(&globalLock);
5738    if (!skip_call)
5739        dev_data->device_dispatch_table->DestroyEvent(device, event, pAllocator);
5740    // TODO : Clean up any internal data structures using this obj.
5741}
5742
5743VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5744vkDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
5745    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5746        ->device_dispatch_table->DestroyQueryPool(device, queryPool, pAllocator);
5747    // TODO : Clean up any internal data structures using this obj.
5748}
5749
5750VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
5751                                                     uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
5752                                                     VkQueryResultFlags flags) {
5753    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5754    unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight;
5755    GLOBAL_CB_NODE *pCB = nullptr;
5756    loader_platform_thread_lock_mutex(&globalLock);
5757    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5758        pCB = getCBNode(dev_data, cmdBuffer);
5759        for (auto queryStatePair : pCB->queryToStateMap) {
5760            queriesInFlight[queryStatePair.first].push_back(cmdBuffer);
5761        }
5762    }
5763    VkBool32 skip_call = VK_FALSE;
5764    for (uint32_t i = 0; i < queryCount; ++i) {
5765        QueryObject query = {queryPool, firstQuery + i};
5766        auto queryElement = queriesInFlight.find(query);
5767        auto queryToStateElement = dev_data->queryToStateMap.find(query);
5768        if (queryToStateElement != dev_data->queryToStateMap.end()) {
5769        }
5770        // Available and in flight
5771        if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5772            queryToStateElement->second) {
5773            for (auto cmdBuffer : queryElement->second) {
5774                pCB = getCBNode(dev_data, cmdBuffer);
5775                auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query);
5776                if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) {
5777                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5778                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5779                                         "Cannot get query results on queryPool %" PRIu64 " with index %d which is in flight.",
5780                                         (uint64_t)(queryPool), firstQuery + i);
5781                } else {
5782                    for (auto event : queryEventElement->second) {
5783                        dev_data->eventMap[event].needsSignaled = true;
5784                    }
5785                }
5786            }
5787            // Unavailable and in flight
5788        } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5789                   !queryToStateElement->second) {
5790            // TODO : Can there be the same query in use by multiple command buffers in flight?
5791            bool make_available = false;
5792            for (auto cmdBuffer : queryElement->second) {
5793                pCB = getCBNode(dev_data, cmdBuffer);
5794                make_available |= pCB->queryToStateMap[query];
5795            }
5796            if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5797                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5798                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5799                                     "Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.",
5800                                     (uint64_t)(queryPool), firstQuery + i);
5801            }
5802            // Unavailable
5803        } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) {
5804            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT,
5805                                 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5806                                 "Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.",
5807                                 (uint64_t)(queryPool), firstQuery + i);
5808            // Unitialized
5809        } else if (queryToStateElement == dev_data->queryToStateMap.end()) {
5810            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT,
5811                                 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5812                                 "Cannot get query results on queryPool %" PRIu64 " with index %d as data has not been collected for this index.",
5813                                 (uint64_t)(queryPool), firstQuery + i);
5814        }
5815    }
5816    loader_platform_thread_unlock_mutex(&globalLock);
5817    if (skip_call)
5818        return VK_ERROR_VALIDATION_FAILED_EXT;
5819    return dev_data->device_dispatch_table->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride,
5820                                                                flags);
5821}
5822
5823VkBool32 validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5824    VkBool32 skip_call = VK_FALSE;
5825    auto buffer_data = my_data->bufferMap.find(buffer);
5826    if (buffer_data == my_data->bufferMap.end()) {
5827        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5828                             (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5829                             "Cannot free buffer %" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5830    } else {
5831        if (buffer_data->second.in_use.load()) {
5832            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5833                                 (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
5834                                 "Cannot free buffer %" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer));
5835        }
5836    }
5837    return skip_call;
5838}
5839
5840VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5841vkDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
5842    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5843    VkBool32 skipCall = VK_FALSE;
5844    loader_platform_thread_lock_mutex(&globalLock);
5845#if MTMERGESOURCE
5846    auto item = dev_data->bufferBindingMap.find((uint64_t)buffer);
5847    if (item != dev_data->bufferBindingMap.end()) {
5848        skipCall = clear_object_binding(dev_data, device, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5849        dev_data->bufferBindingMap.erase(item);
5850    }
5851#endif
5852    if (!validateIdleBuffer(dev_data, buffer) && (VK_FALSE == skipCall)) {
5853        loader_platform_thread_unlock_mutex(&globalLock);
5854        dev_data->device_dispatch_table->DestroyBuffer(device, buffer, pAllocator);
5855        loader_platform_thread_lock_mutex(&globalLock);
5856    }
5857    dev_data->bufferMap.erase(buffer);
5858    loader_platform_thread_unlock_mutex(&globalLock);
5859}
5860
5861VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5862vkDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5863    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5864    dev_data->device_dispatch_table->DestroyBufferView(device, bufferView, pAllocator);
5865    loader_platform_thread_lock_mutex(&globalLock);
5866    auto item = dev_data->bufferViewMap.find(bufferView);
5867    if (item != dev_data->bufferViewMap.end()) {
5868        dev_data->bufferViewMap.erase(item);
5869    }
5870    loader_platform_thread_unlock_mutex(&globalLock);
5871}
5872
5873VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5874    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5875    VkBool32 skipCall = VK_FALSE;
5876#if MTMERGESOURCE
5877    loader_platform_thread_lock_mutex(&globalLock);
5878    auto item = dev_data->imageBindingMap.find((uint64_t)image);
5879    if (item != dev_data->imageBindingMap.end()) {
5880        skipCall = clear_object_binding(dev_data, device, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
5881        dev_data->imageBindingMap.erase(item);
5882    }
5883    loader_platform_thread_unlock_mutex(&globalLock);
5884#endif
5885    if (VK_FALSE == skipCall)
5886        dev_data->device_dispatch_table->DestroyImage(device, image, pAllocator);
5887
5888    loader_platform_thread_lock_mutex(&globalLock);
5889    const auto& entry = dev_data->imageMap.find(image);
5890    if (entry != dev_data->imageMap.end()) {
5891        // Clear any memory mapping for this image
5892        const auto &mem_entry = dev_data->memObjMap.find(entry->second.mem);
5893        if (mem_entry != dev_data->memObjMap.end())
5894            mem_entry->second.image = VK_NULL_HANDLE;
5895
5896        // Remove image from imageMap
5897        dev_data->imageMap.erase(entry);
5898    }
5899    const auto& subEntry = dev_data->imageSubresourceMap.find(image);
5900    if (subEntry != dev_data->imageSubresourceMap.end()) {
5901        for (const auto& pair : subEntry->second) {
5902            dev_data->imageLayoutMap.erase(pair);
5903        }
5904        dev_data->imageSubresourceMap.erase(subEntry);
5905    }
5906    loader_platform_thread_unlock_mutex(&globalLock);
5907}
5908#if MTMERGESOURCE
5909VkBool32 print_memory_range_error(layer_data *dev_data, const uint64_t object_handle, const uint64_t other_handle,
5910                                  VkDebugReportObjectTypeEXT object_type) {
5911    if (object_type == VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT) {
5912        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5913                       MEMTRACK_INVALID_ALIASING, "MEM", "Buffer %" PRIx64 " is alised with image %" PRIx64, object_handle,
5914                       other_handle);
5915    } else {
5916        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5917                       MEMTRACK_INVALID_ALIASING, "MEM", "Image %" PRIx64 " is alised with buffer %" PRIx64, object_handle,
5918                       other_handle);
5919    }
5920}
5921
5922VkBool32 validate_memory_range(layer_data *dev_data, const vector<MEMORY_RANGE> &ranges, const MEMORY_RANGE &new_range,
5923                               VkDebugReportObjectTypeEXT object_type) {
5924    VkBool32 skip_call = false;
5925
5926    for (auto range : ranges) {
5927        if ((range.end & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)) <
5928            (new_range.start & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)))
5929            continue;
5930        if ((range.start & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)) >
5931            (new_range.end & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)))
5932            continue;
5933        skip_call |= print_memory_range_error(dev_data, new_range.handle, range.handle, object_type);
5934    }
5935    return skip_call;
5936}
5937
5938VkBool32 validate_buffer_image_aliasing(layer_data *dev_data, uint64_t handle, VkDeviceMemory mem, VkDeviceSize memoryOffset,
5939                                        VkMemoryRequirements memRequirements, vector<MEMORY_RANGE> &ranges,
5940                                        const vector<MEMORY_RANGE> &other_ranges, VkDebugReportObjectTypeEXT object_type) {
5941    MEMORY_RANGE range;
5942    range.handle = handle;
5943    range.memory = mem;
5944    range.start = memoryOffset;
5945    range.end = memoryOffset + memRequirements.size - 1;
5946    ranges.push_back(range);
5947    return validate_memory_range(dev_data, other_ranges, range, object_type);
5948}
5949
5950VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5951vkBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
5952    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5953    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5954    loader_platform_thread_lock_mutex(&globalLock);
5955    // Track objects tied to memory
5956    uint64_t buffer_handle = (uint64_t)(buffer);
5957    VkBool32 skipCall =
5958        set_mem_binding(dev_data, device, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
5959    add_object_binding_info(dev_data, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, mem);
5960    {
5961        VkMemoryRequirements memRequirements;
5962        // MTMTODO : Shouldn't this call down the chain?
5963        vkGetBufferMemoryRequirements(device, buffer, &memRequirements);
5964        skipCall |= validate_buffer_image_aliasing(dev_data, buffer_handle, mem, memoryOffset, memRequirements,
5965                                                   dev_data->memObjMap[mem].bufferRanges, dev_data->memObjMap[mem].imageRanges,
5966                                                   VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5967    }
5968    print_mem_list(dev_data, device);
5969    loader_platform_thread_unlock_mutex(&globalLock);
5970    if (VK_FALSE == skipCall) {
5971        result = dev_data->device_dispatch_table->BindBufferMemory(device, buffer, mem, memoryOffset);
5972    }
5973    return result;
5974}
5975
5976VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5977vkGetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
5978    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5979    // TODO : What to track here?
5980    //   Could potentially save returned mem requirements and validate values passed into BindBufferMemory
5981    my_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
5982}
5983
5984VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5985vkGetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5986    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5987    // TODO : What to track here?
5988    //   Could potentially save returned mem requirements and validate values passed into BindImageMemory
5989    my_data->device_dispatch_table->GetImageMemoryRequirements(device, image, pMemoryRequirements);
5990}
5991#endif
5992VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5993vkDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
5994    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5995        ->device_dispatch_table->DestroyImageView(device, imageView, pAllocator);
5996    // TODO : Clean up any internal data structures using this obj.
5997}
5998
5999VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6000vkDestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
6001    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6002
6003    loader_platform_thread_lock_mutex(&globalLock);
6004
6005    my_data->shaderModuleMap.erase(shaderModule);
6006
6007    loader_platform_thread_unlock_mutex(&globalLock);
6008
6009    my_data->device_dispatch_table->DestroyShaderModule(device, shaderModule, pAllocator);
6010}
6011
6012VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6013vkDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
6014    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyPipeline(device, pipeline, pAllocator);
6015    // TODO : Clean up any internal data structures using this obj.
6016}
6017
6018VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6019vkDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
6020    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6021        ->device_dispatch_table->DestroyPipelineLayout(device, pipelineLayout, pAllocator);
6022    // TODO : Clean up any internal data structures using this obj.
6023}
6024
6025VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6026vkDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
6027    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroySampler(device, sampler, pAllocator);
6028    // TODO : Clean up any internal data structures using this obj.
6029}
6030
6031VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6032vkDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
6033    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6034        ->device_dispatch_table->DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
6035    // TODO : Clean up any internal data structures using this obj.
6036}
6037
6038VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6039vkDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
6040    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6041        ->device_dispatch_table->DestroyDescriptorPool(device, descriptorPool, pAllocator);
6042    // TODO : Clean up any internal data structures using this obj.
6043}
6044
6045VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6046vkFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
6047    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6048
6049    bool skip_call = false;
6050    loader_platform_thread_lock_mutex(&globalLock);
6051    for (uint32_t i = 0; i < commandBufferCount; i++) {
6052#if MTMERGESOURCE
6053        clear_cmd_buf_and_mem_references(dev_data, pCommandBuffers[i]);
6054#endif
6055        if (dev_data->globalInFlightCmdBuffers.count(pCommandBuffers[i])) {
6056            skip_call |=
6057                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6058                        reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6059                        "Attempt to free command buffer (%#" PRIxLEAST64 ") which is in use.",
6060                        reinterpret_cast<uint64_t>(pCommandBuffers[i]));
6061        }
6062        // Delete CB information structure, and remove from commandBufferMap
6063        auto cb = dev_data->commandBufferMap.find(pCommandBuffers[i]);
6064        if (cb != dev_data->commandBufferMap.end()) {
6065            // reset prior to delete for data clean-up
6066            resetCB(dev_data, (*cb).second->commandBuffer);
6067            delete (*cb).second;
6068            dev_data->commandBufferMap.erase(cb);
6069        }
6070
6071        // Remove commandBuffer reference from commandPoolMap
6072        dev_data->commandPoolMap[commandPool].commandBuffers.remove(pCommandBuffers[i]);
6073    }
6074#if MTMERGESOURCE
6075    printCBList(dev_data, device);
6076#endif
6077    loader_platform_thread_unlock_mutex(&globalLock);
6078
6079    if (!skip_call)
6080        dev_data->device_dispatch_table->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
6081}
6082
6083VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
6084                                                                   const VkAllocationCallbacks *pAllocator,
6085                                                                   VkCommandPool *pCommandPool) {
6086    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6087
6088    VkResult result = dev_data->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
6089
6090    if (VK_SUCCESS == result) {
6091        loader_platform_thread_lock_mutex(&globalLock);
6092        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
6093        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
6094        loader_platform_thread_unlock_mutex(&globalLock);
6095    }
6096    return result;
6097}
6098
6099VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
6100                                                                 const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
6101
6102    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6103    VkResult result = dev_data->device_dispatch_table->CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
6104    if (result == VK_SUCCESS) {
6105        loader_platform_thread_lock_mutex(&globalLock);
6106        dev_data->queryPoolMap[*pQueryPool].createInfo = *pCreateInfo;
6107        loader_platform_thread_unlock_mutex(&globalLock);
6108    }
6109    return result;
6110}
6111
6112VkBool32 validateCommandBuffersNotInUse(const layer_data *dev_data, VkCommandPool commandPool) {
6113    VkBool32 skipCall = VK_FALSE;
6114    auto pool_data = dev_data->commandPoolMap.find(commandPool);
6115    if (pool_data != dev_data->commandPoolMap.end()) {
6116        for (auto cmdBuffer : pool_data->second.commandBuffers) {
6117            if (dev_data->globalInFlightCmdBuffers.count(cmdBuffer)) {
6118                skipCall |=
6119                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT,
6120                            (uint64_t)(commandPool), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
6121                            "Cannot reset command pool %" PRIx64 " when allocated command buffer %" PRIx64 " is in use.",
6122                            (uint64_t)(commandPool), (uint64_t)(cmdBuffer));
6123            }
6124        }
6125    }
6126    return skipCall;
6127}
6128
6129// Destroy commandPool along with all of the commandBuffers allocated from that pool
6130VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6131vkDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
6132    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6133    bool commandBufferComplete = false;
6134    bool skipCall = false;
6135    loader_platform_thread_lock_mutex(&globalLock);
6136#if MTMERGESOURCE
6137    // Verify that command buffers in pool are complete (not in-flight)
6138    // MTMTODO : Merge this with code below (separate *NotInUse() call)
6139    for (auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6140         it != dev_data->commandPoolMap[commandPool].commandBuffers.end(); it++) {
6141        commandBufferComplete = VK_FALSE;
6142        skipCall = checkCBCompleted(dev_data, *it, &commandBufferComplete);
6143        if (VK_FALSE == commandBufferComplete) {
6144            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6145                                (uint64_t)(*it), __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6146                                "Destroying Command Pool 0x%" PRIxLEAST64 " before "
6147                                "its command buffer (0x%" PRIxLEAST64 ") has completed.",
6148                                (uint64_t)(commandPool), reinterpret_cast<uint64_t>(*it));
6149        }
6150    }
6151#endif
6152    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandPoolMap
6153    if (dev_data->commandPoolMap.find(commandPool) != dev_data->commandPoolMap.end()) {
6154        for (auto poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6155             poolCb != dev_data->commandPoolMap[commandPool].commandBuffers.end();) {
6156            auto del_cb = dev_data->commandBufferMap.find(*poolCb);
6157            delete (*del_cb).second;                  // delete CB info structure
6158            dev_data->commandBufferMap.erase(del_cb); // Remove this command buffer
6159            poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.erase(
6160                poolCb); // Remove CB reference from commandPoolMap's list
6161        }
6162    }
6163    dev_data->commandPoolMap.erase(commandPool);
6164
6165    loader_platform_thread_unlock_mutex(&globalLock);
6166
6167    if (VK_TRUE == validateCommandBuffersNotInUse(dev_data, commandPool))
6168        return;
6169
6170    if (!skipCall)
6171        dev_data->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator);
6172#if MTMERGESOURCE
6173    loader_platform_thread_lock_mutex(&globalLock);
6174    auto item = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6175    // Remove command buffers from command buffer map
6176    while (item != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
6177        auto del_item = item++;
6178        delete_cmd_buf_info(dev_data, commandPool, *del_item);
6179    }
6180    dev_data->commandPoolMap.erase(commandPool);
6181    loader_platform_thread_unlock_mutex(&globalLock);
6182#endif
6183}
6184
6185VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6186vkResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
6187    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6188    bool commandBufferComplete = false;
6189    bool skipCall = false;
6190    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6191#if MTMERGESOURCE
6192    // MTMTODO : Merge this with *NotInUse() call below
6193    loader_platform_thread_lock_mutex(&globalLock);
6194    auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6195    // Verify that CB's in pool are complete (not in-flight)
6196    while (it != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
6197        skipCall = checkCBCompleted(dev_data, (*it), &commandBufferComplete);
6198        if (!commandBufferComplete) {
6199            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6200                                (uint64_t)(*it), __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6201                                "Resetting CB %p before it has completed. You must check CB "
6202                                "flag before calling vkResetCommandBuffer().",
6203                                (*it));
6204        } else {
6205            // Clear memory references at this point.
6206            clear_cmd_buf_and_mem_references(dev_data, (*it));
6207        }
6208        ++it;
6209    }
6210    loader_platform_thread_unlock_mutex(&globalLock);
6211#endif
6212    if (VK_TRUE == validateCommandBuffersNotInUse(dev_data, commandPool))
6213        return VK_ERROR_VALIDATION_FAILED_EXT;
6214
6215    if (!skipCall)
6216        result = dev_data->device_dispatch_table->ResetCommandPool(device, commandPool, flags);
6217
6218    // Reset all of the CBs allocated from this pool
6219    if (VK_SUCCESS == result) {
6220        loader_platform_thread_lock_mutex(&globalLock);
6221        auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6222        while (it != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
6223            resetCB(dev_data, (*it));
6224            ++it;
6225        }
6226        loader_platform_thread_unlock_mutex(&globalLock);
6227    }
6228    return result;
6229}
6230
6231VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
6232    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6233    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6234    bool skipCall = false;
6235    loader_platform_thread_lock_mutex(&globalLock);
6236    for (uint32_t i = 0; i < fenceCount; ++i) {
6237#if MTMERGESOURCE
6238        // Reset fence state in fenceCreateInfo structure
6239        // MTMTODO : Merge with code below
6240        auto fence_item = dev_data->fenceMap.find(pFences[i]);
6241        if (fence_item != dev_data->fenceMap.end()) {
6242            // Validate fences in SIGNALED state
6243            if (!(fence_item->second.createInfo.flags & VK_FENCE_CREATE_SIGNALED_BIT)) {
6244                // TODO: I don't see a Valid Usage section for ResetFences. This behavior should be documented there.
6245                skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6246                                   (uint64_t)pFences[i], __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
6247                                   "Fence %#" PRIxLEAST64 " submitted to VkResetFences in UNSIGNALED STATE", (uint64_t)pFences[i]);
6248            } else {
6249                fence_item->second.createInfo.flags =
6250                    static_cast<VkFenceCreateFlags>(fence_item->second.createInfo.flags & ~VK_FENCE_CREATE_SIGNALED_BIT);
6251            }
6252        }
6253#endif
6254        if (dev_data->fenceMap[pFences[i]].in_use.load()) {
6255            skipCall |=
6256                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6257                        reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
6258                        "Fence %#" PRIx64 " is in use by a command buffer.", reinterpret_cast<const uint64_t &>(pFences[i]));
6259        }
6260    }
6261    loader_platform_thread_unlock_mutex(&globalLock);
6262    if (!skipCall)
6263        result = dev_data->device_dispatch_table->ResetFences(device, fenceCount, pFences);
6264    return result;
6265}
6266
6267VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6268vkDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
6269    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6270    auto fbNode = dev_data->frameBufferMap.find(framebuffer);
6271    if (fbNode != dev_data->frameBufferMap.end()) {
6272        for (auto cb : fbNode->second.referencingCmdBuffers) {
6273            auto cbNode = dev_data->commandBufferMap.find(cb);
6274            if (cbNode != dev_data->commandBufferMap.end()) {
6275                // Set CB as invalid and record destroyed framebuffer
6276                cbNode->second->state = CB_INVALID;
6277                loader_platform_thread_lock_mutex(&globalLock);
6278                cbNode->second->destroyedFramebuffers.insert(framebuffer);
6279                loader_platform_thread_unlock_mutex(&globalLock);
6280            }
6281        }
6282        loader_platform_thread_lock_mutex(&globalLock);
6283        dev_data->frameBufferMap.erase(framebuffer);
6284        loader_platform_thread_unlock_mutex(&globalLock);
6285    }
6286    dev_data->device_dispatch_table->DestroyFramebuffer(device, framebuffer, pAllocator);
6287}
6288
6289VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6290vkDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
6291    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6292    dev_data->device_dispatch_table->DestroyRenderPass(device, renderPass, pAllocator);
6293    loader_platform_thread_lock_mutex(&globalLock);
6294    dev_data->renderPassMap.erase(renderPass);
6295    loader_platform_thread_unlock_mutex(&globalLock);
6296}
6297
6298VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
6299                                                              const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
6300    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6301
6302    VkResult result = dev_data->device_dispatch_table->CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
6303
6304    if (VK_SUCCESS == result) {
6305        loader_platform_thread_lock_mutex(&globalLock);
6306#if MTMERGESOURCE
6307        add_object_create_info(dev_data, (uint64_t)*pBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, pCreateInfo);
6308#endif
6309        // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
6310        dev_data->bufferMap[*pBuffer].create_info = unique_ptr<VkBufferCreateInfo>(new VkBufferCreateInfo(*pCreateInfo));
6311        dev_data->bufferMap[*pBuffer].in_use.store(0);
6312        loader_platform_thread_unlock_mutex(&globalLock);
6313    }
6314    return result;
6315}
6316
6317VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
6318                                                                  const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
6319    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6320    VkResult result = dev_data->device_dispatch_table->CreateBufferView(device, pCreateInfo, pAllocator, pView);
6321    if (VK_SUCCESS == result) {
6322        loader_platform_thread_lock_mutex(&globalLock);
6323        dev_data->bufferViewMap[*pView] = VkBufferViewCreateInfo(*pCreateInfo);
6324#if MTMERGESOURCE
6325        // In order to create a valid buffer view, the buffer must have been created with at least one of the
6326        // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
6327        validate_buffer_usage_flags(dev_data, device, pCreateInfo->buffer,
6328                                    VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, VK_FALSE,
6329                                    "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
6330#endif
6331        loader_platform_thread_unlock_mutex(&globalLock);
6332    }
6333    return result;
6334}
6335
6336VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
6337                                                             const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
6338    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6339
6340    VkResult result = dev_data->device_dispatch_table->CreateImage(device, pCreateInfo, pAllocator, pImage);
6341
6342    if (VK_SUCCESS == result) {
6343        loader_platform_thread_lock_mutex(&globalLock);
6344#if MTMERGESOURCE
6345        add_object_create_info(dev_data, (uint64_t)*pImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, pCreateInfo);
6346#endif
6347        IMAGE_LAYOUT_NODE image_node;
6348        image_node.layout = pCreateInfo->initialLayout;
6349        image_node.format = pCreateInfo->format;
6350        dev_data->imageMap[*pImage].createInfo = *pCreateInfo;
6351        ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()};
6352        dev_data->imageSubresourceMap[*pImage].push_back(subpair);
6353        dev_data->imageLayoutMap[subpair] = image_node;
6354        loader_platform_thread_unlock_mutex(&globalLock);
6355    }
6356    return result;
6357}
6358
6359static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
6360    /* expects globalLock to be held by caller */
6361
6362    auto image_node_it = dev_data->imageMap.find(image);
6363    if (image_node_it != dev_data->imageMap.end()) {
6364        /* If the caller used the special values VK_REMAINING_MIP_LEVELS and
6365         * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to
6366         * the actual values.
6367         */
6368        if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
6369            range->levelCount = image_node_it->second.createInfo.mipLevels - range->baseMipLevel;
6370        }
6371
6372        if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
6373            range->layerCount = image_node_it->second.createInfo.arrayLayers - range->baseArrayLayer;
6374        }
6375    }
6376}
6377
6378// Return the correct layer/level counts if the caller used the special
6379// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
6380static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
6381                                         VkImage image) {
6382    /* expects globalLock to be held by caller */
6383
6384    *levels = range.levelCount;
6385    *layers = range.layerCount;
6386    auto image_node_it = dev_data->imageMap.find(image);
6387    if (image_node_it != dev_data->imageMap.end()) {
6388        if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
6389            *levels = image_node_it->second.createInfo.mipLevels - range.baseMipLevel;
6390        }
6391        if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
6392            *layers = image_node_it->second.createInfo.arrayLayers - range.baseArrayLayer;
6393        }
6394    }
6395}
6396
6397VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
6398                                                                 const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
6399    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6400    VkResult result = dev_data->device_dispatch_table->CreateImageView(device, pCreateInfo, pAllocator, pView);
6401    if (VK_SUCCESS == result) {
6402        loader_platform_thread_lock_mutex(&globalLock);
6403        VkImageViewCreateInfo localCI = VkImageViewCreateInfo(*pCreateInfo);
6404        ResolveRemainingLevelsLayers(dev_data, &localCI.subresourceRange, pCreateInfo->image);
6405        dev_data->imageViewMap[*pView] = localCI;
6406#if MTMERGESOURCE
6407        // Validate that img has correct usage flags set
6408        validate_image_usage_flags(dev_data, device, pCreateInfo->image,
6409                                   VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT |
6410                                       VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
6411                                   VK_FALSE, "vkCreateImageView()", "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT]_BIT");
6412#endif
6413        loader_platform_thread_unlock_mutex(&globalLock);
6414    }
6415    return result;
6416}
6417
6418VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6419vkCreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
6420    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6421    VkResult result = dev_data->device_dispatch_table->CreateFence(device, pCreateInfo, pAllocator, pFence);
6422    if (VK_SUCCESS == result) {
6423        loader_platform_thread_lock_mutex(&globalLock);
6424        FENCE_NODE *pFN = &dev_data->fenceMap[*pFence];
6425#if MTMERGESOURCE
6426        memset(pFN, 0, sizeof(MT_FENCE_INFO));
6427        memcpy(&(pFN->createInfo), pCreateInfo, sizeof(VkFenceCreateInfo));
6428        if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
6429            pFN->firstTimeFlag = VK_TRUE;
6430        }
6431#endif
6432        pFN->in_use.store(0);
6433        loader_platform_thread_unlock_mutex(&globalLock);
6434    }
6435    return result;
6436}
6437
6438// TODO handle pipeline caches
6439VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
6440                                                     const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
6441    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6442    VkResult result = dev_data->device_dispatch_table->CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
6443    return result;
6444}
6445
6446VKAPI_ATTR void VKAPI_CALL
6447vkDestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
6448    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6449    dev_data->device_dispatch_table->DestroyPipelineCache(device, pipelineCache, pAllocator);
6450}
6451
6452VKAPI_ATTR VkResult VKAPI_CALL
6453vkGetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
6454    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6455    VkResult result = dev_data->device_dispatch_table->GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
6456    return result;
6457}
6458
6459VKAPI_ATTR VkResult VKAPI_CALL
6460vkMergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
6461    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6462    VkResult result = dev_data->device_dispatch_table->MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
6463    return result;
6464}
6465
6466VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6467vkCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6468                          const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6469                          VkPipeline *pPipelines) {
6470    VkResult result = VK_SUCCESS;
6471    // TODO What to do with pipelineCache?
6472    // The order of operations here is a little convoluted but gets the job done
6473    //  1. Pipeline create state is first shadowed into PIPELINE_NODE struct
6474    //  2. Create state is then validated (which uses flags setup during shadowing)
6475    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
6476    VkBool32 skipCall = VK_FALSE;
6477    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6478    vector<PIPELINE_NODE *> pPipeNode(count);
6479    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6480
6481    uint32_t i = 0;
6482    loader_platform_thread_lock_mutex(&globalLock);
6483
6484    for (i = 0; i < count; i++) {
6485        pPipeNode[i] = initGraphicsPipeline(dev_data, &pCreateInfos[i]);
6486        skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode, i);
6487    }
6488
6489    if (VK_FALSE == skipCall) {
6490        loader_platform_thread_unlock_mutex(&globalLock);
6491        result = dev_data->device_dispatch_table->CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6492                                                                          pPipelines);
6493        loader_platform_thread_lock_mutex(&globalLock);
6494        for (i = 0; i < count; i++) {
6495            pPipeNode[i]->pipeline = pPipelines[i];
6496            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6497        }
6498        loader_platform_thread_unlock_mutex(&globalLock);
6499    } else {
6500        for (i = 0; i < count; i++) {
6501            delete pPipeNode[i];
6502        }
6503        loader_platform_thread_unlock_mutex(&globalLock);
6504        return VK_ERROR_VALIDATION_FAILED_EXT;
6505    }
6506    return result;
6507}
6508
6509VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6510vkCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6511                         const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6512                         VkPipeline *pPipelines) {
6513    VkResult result = VK_SUCCESS;
6514    VkBool32 skipCall = VK_FALSE;
6515
6516    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6517    vector<PIPELINE_NODE *> pPipeNode(count);
6518    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6519
6520    uint32_t i = 0;
6521    loader_platform_thread_lock_mutex(&globalLock);
6522    for (i = 0; i < count; i++) {
6523        // TODO: Verify compute stage bits
6524
6525        // Create and initialize internal tracking data structure
6526        pPipeNode[i] = new PIPELINE_NODE;
6527        memcpy(&pPipeNode[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo));
6528
6529        // TODO: Add Compute Pipeline Verification
6530        // skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode[i]);
6531    }
6532
6533    if (VK_FALSE == skipCall) {
6534        loader_platform_thread_unlock_mutex(&globalLock);
6535        result = dev_data->device_dispatch_table->CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6536                                                                         pPipelines);
6537        loader_platform_thread_lock_mutex(&globalLock);
6538        for (i = 0; i < count; i++) {
6539            pPipeNode[i]->pipeline = pPipelines[i];
6540            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6541        }
6542        loader_platform_thread_unlock_mutex(&globalLock);
6543    } else {
6544        for (i = 0; i < count; i++) {
6545            // Clean up any locally allocated data structures
6546            delete pPipeNode[i];
6547        }
6548        loader_platform_thread_unlock_mutex(&globalLock);
6549        return VK_ERROR_VALIDATION_FAILED_EXT;
6550    }
6551    return result;
6552}
6553
6554VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
6555                                                               const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
6556    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6557    VkResult result = dev_data->device_dispatch_table->CreateSampler(device, pCreateInfo, pAllocator, pSampler);
6558    if (VK_SUCCESS == result) {
6559        loader_platform_thread_lock_mutex(&globalLock);
6560        dev_data->sampleMap[*pSampler] = unique_ptr<SAMPLER_NODE>(new SAMPLER_NODE(pSampler, pCreateInfo));
6561        loader_platform_thread_unlock_mutex(&globalLock);
6562    }
6563    return result;
6564}
6565
6566VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6567vkCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
6568                            const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
6569    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6570    VkResult result = dev_data->device_dispatch_table->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
6571    if (VK_SUCCESS == result) {
6572        // TODOSC : Capture layout bindings set
6573        LAYOUT_NODE *pNewNode = new LAYOUT_NODE;
6574        if (NULL == pNewNode) {
6575            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
6576                        (uint64_t)*pSetLayout, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6577                        "Out of memory while attempting to allocate LAYOUT_NODE in vkCreateDescriptorSetLayout()"))
6578                return VK_ERROR_VALIDATION_FAILED_EXT;
6579        }
6580        memcpy((void *)&pNewNode->createInfo, pCreateInfo, sizeof(VkDescriptorSetLayoutCreateInfo));
6581        pNewNode->createInfo.pBindings = new VkDescriptorSetLayoutBinding[pCreateInfo->bindingCount];
6582        memcpy((void *)pNewNode->createInfo.pBindings, pCreateInfo->pBindings,
6583               sizeof(VkDescriptorSetLayoutBinding) * pCreateInfo->bindingCount);
6584        // g++ does not like reserve with size 0
6585        if (pCreateInfo->bindingCount)
6586            pNewNode->bindingToIndexMap.reserve(pCreateInfo->bindingCount);
6587        uint32_t totalCount = 0;
6588        for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
6589            if (!pNewNode->bindingToIndexMap.emplace(pCreateInfo->pBindings[i].binding, i).second) {
6590                if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6591                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)*pSetLayout, __LINE__,
6592                            DRAWSTATE_INVALID_LAYOUT, "DS", "duplicated binding number in "
6593                                                            "VkDescriptorSetLayoutBinding"))
6594                    return VK_ERROR_VALIDATION_FAILED_EXT;
6595            } else {
6596                pNewNode->bindingToIndexMap[pCreateInfo->pBindings[i].binding] = i;
6597            }
6598            totalCount += pCreateInfo->pBindings[i].descriptorCount;
6599            if (pCreateInfo->pBindings[i].pImmutableSamplers) {
6600                VkSampler **ppIS = (VkSampler **)&pNewNode->createInfo.pBindings[i].pImmutableSamplers;
6601                *ppIS = new VkSampler[pCreateInfo->pBindings[i].descriptorCount];
6602                memcpy(*ppIS, pCreateInfo->pBindings[i].pImmutableSamplers,
6603                       pCreateInfo->pBindings[i].descriptorCount * sizeof(VkSampler));
6604            }
6605        }
6606        pNewNode->layout = *pSetLayout;
6607        pNewNode->startIndex = 0;
6608        if (totalCount > 0) {
6609            pNewNode->descriptorTypes.resize(totalCount);
6610            pNewNode->stageFlags.resize(totalCount);
6611            uint32_t offset = 0;
6612            uint32_t j = 0;
6613            VkDescriptorType dType;
6614            for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
6615                dType = pCreateInfo->pBindings[i].descriptorType;
6616                for (j = 0; j < pCreateInfo->pBindings[i].descriptorCount; j++) {
6617                    pNewNode->descriptorTypes[offset + j] = dType;
6618                    pNewNode->stageFlags[offset + j] = pCreateInfo->pBindings[i].stageFlags;
6619                    if ((dType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
6620                        (dType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
6621                        pNewNode->dynamicDescriptorCount++;
6622                    }
6623                }
6624                offset += j;
6625            }
6626            pNewNode->endIndex = pNewNode->startIndex + totalCount - 1;
6627        } else { // no descriptors
6628            pNewNode->endIndex = 0;
6629        }
6630        // Put new node at Head of global Layer list
6631        loader_platform_thread_lock_mutex(&globalLock);
6632        dev_data->descriptorSetLayoutMap[*pSetLayout] = pNewNode;
6633        loader_platform_thread_unlock_mutex(&globalLock);
6634    }
6635    return result;
6636}
6637
6638static bool validatePushConstantSize(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
6639                                     const char *caller_name) {
6640    bool skipCall = false;
6641    if ((offset + size) > dev_data->physDevProperties.properties.limits.maxPushConstantsSize) {
6642        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6643                           DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that "
6644                                                                 "exceeds this device's maxPushConstantSize of %u.",
6645                           caller_name, offset, size, dev_data->physDevProperties.properties.limits.maxPushConstantsSize);
6646    }
6647    return skipCall;
6648}
6649
6650VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
6651                                                      const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
6652    bool skipCall = false;
6653    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6654    uint32_t i = 0;
6655    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6656        skipCall |= validatePushConstantSize(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
6657                                             pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()");
6658        if ((pCreateInfo->pPushConstantRanges[i].size == 0) || ((pCreateInfo->pPushConstantRanges[i].size & 0x3) != 0)) {
6659            skipCall |=
6660                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6661                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constant index %u with "
6662                                                              "size %u. Size must be greater than zero and a multiple of 4.",
6663                        i, pCreateInfo->pPushConstantRanges[i].size);
6664        }
6665        // TODO : Add warning if ranges overlap
6666    }
6667    VkResult result = dev_data->device_dispatch_table->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
6668    if (VK_SUCCESS == result) {
6669        loader_platform_thread_lock_mutex(&globalLock);
6670        // TODOSC : Merge capture of the setLayouts per pipeline
6671        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
6672        plNode.descriptorSetLayouts.resize(pCreateInfo->setLayoutCount);
6673        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
6674            plNode.descriptorSetLayouts[i] = pCreateInfo->pSetLayouts[i];
6675        }
6676        plNode.pushConstantRanges.resize(pCreateInfo->pushConstantRangeCount);
6677        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6678            plNode.pushConstantRanges[i] = pCreateInfo->pPushConstantRanges[i];
6679        }
6680        loader_platform_thread_unlock_mutex(&globalLock);
6681    }
6682    return result;
6683}
6684
6685VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6686vkCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
6687                       VkDescriptorPool *pDescriptorPool) {
6688    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6689    VkResult result = dev_data->device_dispatch_table->CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
6690    if (VK_SUCCESS == result) {
6691        // Insert this pool into Global Pool LL at head
6692        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6693                    (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool %#" PRIxLEAST64,
6694                    (uint64_t)*pDescriptorPool))
6695            return VK_ERROR_VALIDATION_FAILED_EXT;
6696        DESCRIPTOR_POOL_NODE *pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo);
6697        if (NULL == pNewNode) {
6698            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6699                        (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6700                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()"))
6701                return VK_ERROR_VALIDATION_FAILED_EXT;
6702        } else {
6703            loader_platform_thread_lock_mutex(&globalLock);
6704            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
6705            loader_platform_thread_unlock_mutex(&globalLock);
6706        }
6707    } else {
6708        // Need to do anything if pool create fails?
6709    }
6710    return result;
6711}
6712
6713VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6714vkResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
6715    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6716    VkResult result = dev_data->device_dispatch_table->ResetDescriptorPool(device, descriptorPool, flags);
6717    if (VK_SUCCESS == result) {
6718        loader_platform_thread_lock_mutex(&globalLock);
6719        clearDescriptorPool(dev_data, device, descriptorPool, flags);
6720        loader_platform_thread_unlock_mutex(&globalLock);
6721    }
6722    return result;
6723}
6724
6725VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6726vkAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
6727    VkBool32 skipCall = VK_FALSE;
6728    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6729
6730    loader_platform_thread_lock_mutex(&globalLock);
6731    // Verify that requested descriptorSets are available in pool
6732    DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool);
6733    if (!pPoolNode) {
6734        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6735                            (uint64_t)pAllocateInfo->descriptorPool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
6736                            "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
6737                            (uint64_t)pAllocateInfo->descriptorPool);
6738    } else { // Make sure pool has all the available descriptors before calling down chain
6739        skipCall |= validate_descriptor_availability_in_pool(dev_data, pPoolNode, pAllocateInfo->descriptorSetCount,
6740                                                             pAllocateInfo->pSetLayouts);
6741    }
6742    loader_platform_thread_unlock_mutex(&globalLock);
6743    if (skipCall)
6744        return VK_ERROR_VALIDATION_FAILED_EXT;
6745    VkResult result = dev_data->device_dispatch_table->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
6746    if (VK_SUCCESS == result) {
6747        loader_platform_thread_lock_mutex(&globalLock);
6748        DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool);
6749        if (pPoolNode) {
6750            if (pAllocateInfo->descriptorSetCount == 0) {
6751                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6752                        pAllocateInfo->descriptorSetCount, __LINE__, DRAWSTATE_NONE, "DS",
6753                        "AllocateDescriptorSets called with 0 count");
6754            }
6755            for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
6756                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6757                        (uint64_t)pDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS", "Created Descriptor Set %#" PRIxLEAST64,
6758                        (uint64_t)pDescriptorSets[i]);
6759                // Create new set node and add to head of pool nodes
6760                SET_NODE *pNewNode = new SET_NODE;
6761                if (NULL == pNewNode) {
6762                    if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6763                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6764                                DRAWSTATE_OUT_OF_MEMORY, "DS",
6765                                "Out of memory while attempting to allocate SET_NODE in vkAllocateDescriptorSets()")) {
6766                        loader_platform_thread_unlock_mutex(&globalLock);
6767                        return VK_ERROR_VALIDATION_FAILED_EXT;
6768                    }
6769                } else {
6770                    // TODO : Pool should store a total count of each type of Descriptor available
6771                    //  When descriptors are allocated, decrement the count and validate here
6772                    //  that the count doesn't go below 0. One reset/free need to bump count back up.
6773                    // Insert set at head of Set LL for this pool
6774                    pNewNode->pNext = pPoolNode->pSets;
6775                    pNewNode->in_use.store(0);
6776                    pPoolNode->pSets = pNewNode;
6777                    LAYOUT_NODE *pLayout = getLayoutNode(dev_data, pAllocateInfo->pSetLayouts[i]);
6778                    if (NULL == pLayout) {
6779                        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6780                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)pAllocateInfo->pSetLayouts[i],
6781                                    __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
6782                                    "Unable to find set layout node for layout %#" PRIxLEAST64
6783                                    " specified in vkAllocateDescriptorSets() call",
6784                                    (uint64_t)pAllocateInfo->pSetLayouts[i])) {
6785                            loader_platform_thread_unlock_mutex(&globalLock);
6786                            return VK_ERROR_VALIDATION_FAILED_EXT;
6787                        }
6788                    }
6789                    pNewNode->pLayout = pLayout;
6790                    pNewNode->pool = pAllocateInfo->descriptorPool;
6791                    pNewNode->set = pDescriptorSets[i];
6792                    pNewNode->descriptorCount = (pLayout->createInfo.bindingCount != 0) ? pLayout->endIndex + 1 : 0;
6793                    if (pNewNode->descriptorCount) {
6794                        pNewNode->pDescriptorUpdates.resize(pNewNode->descriptorCount);
6795                    }
6796                    dev_data->setMap[pDescriptorSets[i]] = pNewNode;
6797                }
6798            }
6799        }
6800        loader_platform_thread_unlock_mutex(&globalLock);
6801    }
6802    return result;
6803}
6804
6805VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6806vkFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
6807    VkBool32 skipCall = VK_FALSE;
6808    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6809    // Make sure that no sets being destroyed are in-flight
6810    loader_platform_thread_lock_mutex(&globalLock);
6811    for (uint32_t i = 0; i < count; ++i)
6812        skipCall |= validateIdleDescriptorSet(dev_data, pDescriptorSets[i], "vkFreeDesriptorSets");
6813    DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, descriptorPool);
6814    if (pPoolNode && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pPoolNode->createInfo.flags)) {
6815        // Can't Free from a NON_FREE pool
6816        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
6817                            (uint64_t)device, __LINE__, DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, "DS",
6818                            "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
6819                            "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
6820    }
6821    loader_platform_thread_unlock_mutex(&globalLock);
6822    if (VK_FALSE != skipCall)
6823        return VK_ERROR_VALIDATION_FAILED_EXT;
6824    VkResult result = dev_data->device_dispatch_table->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
6825    if (VK_SUCCESS == result) {
6826        loader_platform_thread_lock_mutex(&globalLock);
6827
6828        // Update available descriptor sets in pool
6829        pPoolNode->availableSets += count;
6830
6831        // For each freed descriptor add it back into the pool as available
6832        for (uint32_t i = 0; i < count; ++i) {
6833            SET_NODE *pSet = dev_data->setMap[pDescriptorSets[i]]; // getSetNode() without locking
6834            invalidateBoundCmdBuffers(dev_data, pSet);
6835            LAYOUT_NODE *pLayout = pSet->pLayout;
6836            uint32_t typeIndex = 0, poolSizeCount = 0;
6837            for (uint32_t j = 0; j < pLayout->createInfo.bindingCount; ++j) {
6838                typeIndex = static_cast<uint32_t>(pLayout->createInfo.pBindings[j].descriptorType);
6839                poolSizeCount = pLayout->createInfo.pBindings[j].descriptorCount;
6840                pPoolNode->availableDescriptorTypeCount[typeIndex] += poolSizeCount;
6841            }
6842        }
6843        loader_platform_thread_unlock_mutex(&globalLock);
6844    }
6845    // TODO : Any other clean-up or book-keeping to do here?
6846    return result;
6847}
6848
6849VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6850vkUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
6851                       uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
6852    // dsUpdate will return VK_TRUE only if a bailout error occurs, so we want to call down tree when update returns VK_FALSE
6853    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6854    loader_platform_thread_lock_mutex(&globalLock);
6855#if MTMERGESOURCE
6856    // MTMTODO : Merge this in with existing update code below and handle descriptor copies case
6857    uint32_t j = 0;
6858    for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
6859        if (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
6860            for (j = 0; j < pDescriptorWrites[i].descriptorCount; ++j) {
6861                dev_data->descriptorSetMap[pDescriptorWrites[i].dstSet].images.push_back(
6862                    pDescriptorWrites[i].pImageInfo[j].imageView);
6863            }
6864        } else if (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) {
6865            for (j = 0; j < pDescriptorWrites[i].descriptorCount; ++j) {
6866                dev_data->descriptorSetMap[pDescriptorWrites[i].dstSet].buffers.push_back(
6867                    dev_data->bufferViewMap[pDescriptorWrites[i].pTexelBufferView[j]].buffer);
6868            }
6869        } else if (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
6870                   pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
6871            for (j = 0; j < pDescriptorWrites[i].descriptorCount; ++j) {
6872                dev_data->descriptorSetMap[pDescriptorWrites[i].dstSet].buffers.push_back(
6873                    pDescriptorWrites[i].pBufferInfo[j].buffer);
6874            }
6875        }
6876    }
6877#endif
6878    VkBool32 rtn = dsUpdate(dev_data, device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
6879    loader_platform_thread_unlock_mutex(&globalLock);
6880    if (!rtn) {
6881        dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6882                                                              pDescriptorCopies);
6883    }
6884}
6885
6886VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6887vkAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
6888    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6889    VkResult result = dev_data->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
6890    if (VK_SUCCESS == result) {
6891        loader_platform_thread_lock_mutex(&globalLock);
6892        auto const &cp_it = dev_data->commandPoolMap.find(pCreateInfo->commandPool);
6893        if (cp_it != dev_data->commandPoolMap.end()) {
6894            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
6895                // Add command buffer to its commandPool map
6896                cp_it->second.commandBuffers.push_back(pCommandBuffer[i]);
6897                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
6898                // Add command buffer to map
6899                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
6900                resetCB(dev_data, pCommandBuffer[i]);
6901                pCB->createInfo = *pCreateInfo;
6902                pCB->device = device;
6903            }
6904        }
6905#if MTMERGESOURCE
6906        printCBList(dev_data, device);
6907#endif
6908        loader_platform_thread_unlock_mutex(&globalLock);
6909    }
6910    return result;
6911}
6912
6913VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6914vkBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
6915    VkBool32 skipCall = VK_FALSE;
6916    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6917    loader_platform_thread_lock_mutex(&globalLock);
6918    // Validate command buffer level
6919    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6920    if (pCB) {
6921#if MTMERGESOURCE
6922        bool commandBufferComplete = false;
6923        // MTMTODO : Merge this with code below
6924        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
6925        skipCall = checkCBCompleted(dev_data, commandBuffer, &commandBufferComplete);
6926
6927        if (!commandBufferComplete) {
6928            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6929                                (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6930                                "Calling vkBeginCommandBuffer() on active CB %p before it has completed. "
6931                                "You must check CB flag before this call.",
6932                                commandBuffer);
6933        }
6934#endif
6935        if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
6936            // Secondary Command Buffer
6937            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
6938            if (!pInfo) {
6939                skipCall |=
6940                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6941                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6942                            "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must have inheritance info.",
6943                            reinterpret_cast<void *>(commandBuffer));
6944            } else {
6945                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
6946                    if (!pInfo->renderPass) { // renderpass should NOT be null for an Secondary CB
6947                        skipCall |= log_msg(
6948                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6949                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6950                            "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must specify a valid renderpass parameter.",
6951                            reinterpret_cast<void *>(commandBuffer));
6952                    }
6953                    if (!pInfo->framebuffer) { // framebuffer may be null for an Secondary CB, but this affects perf
6954                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
6955                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6956                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE,
6957                                            "DS", "vkBeginCommandBuffer(): Secondary Command Buffers (%p) may perform better if a "
6958                                                  "valid framebuffer parameter is specified.",
6959                                            reinterpret_cast<void *>(commandBuffer));
6960                    } else {
6961                        string errorString = "";
6962                        auto fbNode = dev_data->frameBufferMap.find(pInfo->framebuffer);
6963                        if (fbNode != dev_data->frameBufferMap.end()) {
6964                            VkRenderPass fbRP = fbNode->second.createInfo.renderPass;
6965                            if (!verify_renderpass_compatibility(dev_data, fbRP, pInfo->renderPass, errorString)) {
6966                                // renderPass that framebuffer was created with
6967                                // must
6968                                // be compatible with local renderPass
6969                                skipCall |=
6970                                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6971                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6972                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
6973                                            "DS", "vkBeginCommandBuffer(): Secondary Command "
6974                                                  "Buffer (%p) renderPass (%#" PRIxLEAST64 ") is incompatible w/ framebuffer "
6975                                                  "(%#" PRIxLEAST64 ") w/ render pass (%#" PRIxLEAST64 ") due to: %s",
6976                                            reinterpret_cast<void *>(commandBuffer), (uint64_t)(pInfo->renderPass),
6977                                            (uint64_t)(pInfo->framebuffer), (uint64_t)(fbRP), errorString.c_str());
6978                            }
6979                            // Connect this framebuffer to this cmdBuffer
6980                            fbNode->second.referencingCmdBuffers.insert(pCB->commandBuffer);
6981                        }
6982                    }
6983                }
6984                if ((pInfo->occlusionQueryEnable == VK_FALSE ||
6985                     dev_data->physDevProperties.features.occlusionQueryPrecise == VK_FALSE) &&
6986                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
6987                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6988                                        VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
6989                                        __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6990                                        "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must not have "
6991                                        "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
6992                                        "support precise occlusion queries.",
6993                                        reinterpret_cast<void *>(commandBuffer));
6994                }
6995            }
6996            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
6997                auto rp_data = dev_data->renderPassMap.find(pInfo->renderPass);
6998                if (rp_data != dev_data->renderPassMap.end() && rp_data->second && rp_data->second->pCreateInfo) {
6999                    if (pInfo->subpass >= rp_data->second->pCreateInfo->subpassCount) {
7000                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7001                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
7002                                            DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7003                                            "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must has a subpass index (%d) "
7004                                            "that is less than the number of subpasses (%d).",
7005                                            (void *)commandBuffer, pInfo->subpass, rp_data->second->pCreateInfo->subpassCount);
7006                    }
7007                }
7008            }
7009        }
7010        if (CB_RECORDING == pCB->state) {
7011            skipCall |=
7012                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7013                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7014                        "vkBeginCommandBuffer(): Cannot call Begin on CB (%#" PRIxLEAST64
7015                        ") in the RECORDING state. Must first call vkEndCommandBuffer().",
7016                        (uint64_t)commandBuffer);
7017        } else if (CB_RECORDED == pCB->state) {
7018            VkCommandPool cmdPool = pCB->createInfo.commandPool;
7019            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
7020                skipCall |=
7021                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7022                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7023                            "Call to vkBeginCommandBuffer() on command buffer (%#" PRIxLEAST64
7024                            ") attempts to implicitly reset cmdBuffer created from command pool (%#" PRIxLEAST64
7025                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
7026                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
7027            }
7028            resetCB(dev_data, commandBuffer);
7029        }
7030        // Set updated state here in case implicit reset occurs above
7031        pCB->state = CB_RECORDING;
7032        pCB->beginInfo = *pBeginInfo;
7033        if (pCB->beginInfo.pInheritanceInfo) {
7034            pCB->inheritanceInfo = *(pCB->beginInfo.pInheritanceInfo);
7035            pCB->beginInfo.pInheritanceInfo = &pCB->inheritanceInfo;
7036        }
7037    } else {
7038        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7039                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
7040                            "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB %p!", (void *)commandBuffer);
7041    }
7042    loader_platform_thread_unlock_mutex(&globalLock);
7043    if (VK_FALSE != skipCall) {
7044        return VK_ERROR_VALIDATION_FAILED_EXT;
7045    }
7046    VkResult result = dev_data->device_dispatch_table->BeginCommandBuffer(commandBuffer, pBeginInfo);
7047#if MTMERGESOURCE
7048    loader_platform_thread_lock_mutex(&globalLock);
7049    clear_cmd_buf_and_mem_references(dev_data, commandBuffer);
7050    loader_platform_thread_unlock_mutex(&globalLock);
7051#endif
7052    return result;
7053}
7054
7055VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(VkCommandBuffer commandBuffer) {
7056    VkBool32 skipCall = VK_FALSE;
7057    VkResult result = VK_SUCCESS;
7058    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7059    loader_platform_thread_lock_mutex(&globalLock);
7060    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7061    if (pCB) {
7062        if (pCB->state != CB_RECORDING) {
7063            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkEndCommandBuffer()");
7064        }
7065        for (auto query : pCB->activeQueries) {
7066            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7067                                DRAWSTATE_INVALID_QUERY, "DS",
7068                                "Ending command buffer with in progress query: queryPool %" PRIu64 ", index %d",
7069                                (uint64_t)(query.pool), query.index);
7070        }
7071    }
7072    if (VK_FALSE == skipCall) {
7073        loader_platform_thread_unlock_mutex(&globalLock);
7074        result = dev_data->device_dispatch_table->EndCommandBuffer(commandBuffer);
7075        loader_platform_thread_lock_mutex(&globalLock);
7076        if (VK_SUCCESS == result) {
7077            pCB->state = CB_RECORDED;
7078            // Reset CB status flags
7079            pCB->status = 0;
7080            printCB(dev_data, commandBuffer);
7081        }
7082    } else {
7083        result = VK_ERROR_VALIDATION_FAILED_EXT;
7084    }
7085    loader_platform_thread_unlock_mutex(&globalLock);
7086    return result;
7087}
7088
7089VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
7090vkResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
7091    VkBool32 skipCall = VK_FALSE;
7092    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7093    loader_platform_thread_lock_mutex(&globalLock);
7094#if MTMERGESOURCE
7095    bool commandBufferComplete = false;
7096    // Verify that CB is complete (not in-flight)
7097    skipCall = checkCBCompleted(dev_data, commandBuffer, &commandBufferComplete);
7098    if (!commandBufferComplete) {
7099        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7100                            (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
7101                            "Resetting CB %p before it has completed. You must check CB "
7102                            "flag before calling vkResetCommandBuffer().",
7103                            commandBuffer);
7104    }
7105    // Clear memory references as this point.
7106    clear_cmd_buf_and_mem_references(dev_data, commandBuffer);
7107#endif
7108    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7109    VkCommandPool cmdPool = pCB->createInfo.commandPool;
7110    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
7111        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7112                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7113                            "Attempt to reset command buffer (%#" PRIxLEAST64 ") created from command pool (%#" PRIxLEAST64
7114                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
7115                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
7116    }
7117    if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
7118        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7119                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7120                            "Attempt to reset command buffer (%#" PRIxLEAST64 ") which is in use.",
7121                            reinterpret_cast<uint64_t>(commandBuffer));
7122    }
7123    loader_platform_thread_unlock_mutex(&globalLock);
7124    if (skipCall != VK_FALSE)
7125        return VK_ERROR_VALIDATION_FAILED_EXT;
7126    VkResult result = dev_data->device_dispatch_table->ResetCommandBuffer(commandBuffer, flags);
7127    if (VK_SUCCESS == result) {
7128        loader_platform_thread_lock_mutex(&globalLock);
7129        resetCB(dev_data, commandBuffer);
7130        loader_platform_thread_unlock_mutex(&globalLock);
7131    }
7132    return result;
7133}
7134#if MTMERGESOURCE
7135// TODO : For any vkCmdBind* calls that include an object which has mem bound to it,
7136//    need to account for that mem now having binding to given commandBuffer
7137#endif
7138VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7139vkCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
7140    VkBool32 skipCall = VK_FALSE;
7141    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7142    loader_platform_thread_lock_mutex(&globalLock);
7143    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7144    if (pCB) {
7145        skipCall |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
7146        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) {
7147            skipCall |=
7148                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7149                        (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
7150                        "Incorrectly binding compute pipeline (%#" PRIxLEAST64 ") during active RenderPass (%#" PRIxLEAST64 ")",
7151                        (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass);
7152        }
7153
7154        PIPELINE_NODE *pPN = getPipeline(dev_data, pipeline);
7155        if (pPN) {
7156            pCB->lastBound[pipelineBindPoint].pipeline = pipeline;
7157            set_cb_pso_status(pCB, pPN);
7158            skipCall |= validatePipelineState(dev_data, pCB, pipelineBindPoint, pipeline);
7159        } else {
7160            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7161                                (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
7162                                "Attempt to bind Pipeline %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline));
7163        }
7164    }
7165    loader_platform_thread_unlock_mutex(&globalLock);
7166    if (VK_FALSE == skipCall)
7167        dev_data->device_dispatch_table->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
7168}
7169
7170VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7171vkCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
7172    VkBool32 skipCall = VK_FALSE;
7173    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7174    loader_platform_thread_lock_mutex(&globalLock);
7175    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7176    if (pCB) {
7177        skipCall |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
7178        pCB->status |= CBSTATUS_VIEWPORT_SET;
7179        pCB->viewports.resize(viewportCount);
7180        memcpy(pCB->viewports.data(), pViewports, viewportCount * sizeof(VkViewport));
7181    }
7182    loader_platform_thread_unlock_mutex(&globalLock);
7183    if (VK_FALSE == skipCall)
7184        dev_data->device_dispatch_table->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
7185}
7186
7187VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7188vkCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
7189    VkBool32 skipCall = VK_FALSE;
7190    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7191    loader_platform_thread_lock_mutex(&globalLock);
7192    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7193    if (pCB) {
7194        skipCall |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
7195        pCB->status |= CBSTATUS_SCISSOR_SET;
7196        pCB->scissors.resize(scissorCount);
7197        memcpy(pCB->scissors.data(), pScissors, scissorCount * sizeof(VkRect2D));
7198    }
7199    loader_platform_thread_unlock_mutex(&globalLock);
7200    if (VK_FALSE == skipCall)
7201        dev_data->device_dispatch_table->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
7202}
7203
7204VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
7205    VkBool32 skipCall = VK_FALSE;
7206    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7207    loader_platform_thread_lock_mutex(&globalLock);
7208    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7209    if (pCB) {
7210        skipCall |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
7211        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
7212    }
7213    loader_platform_thread_unlock_mutex(&globalLock);
7214    if (VK_FALSE == skipCall)
7215        dev_data->device_dispatch_table->CmdSetLineWidth(commandBuffer, lineWidth);
7216}
7217
7218VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7219vkCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
7220    VkBool32 skipCall = VK_FALSE;
7221    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7222    loader_platform_thread_lock_mutex(&globalLock);
7223    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7224    if (pCB) {
7225        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
7226        pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
7227    }
7228    loader_platform_thread_unlock_mutex(&globalLock);
7229    if (VK_FALSE == skipCall)
7230        dev_data->device_dispatch_table->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp,
7231                                                         depthBiasSlopeFactor);
7232}
7233
7234VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
7235    VkBool32 skipCall = VK_FALSE;
7236    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7237    loader_platform_thread_lock_mutex(&globalLock);
7238    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7239    if (pCB) {
7240        skipCall |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
7241        pCB->status |= CBSTATUS_BLEND_SET;
7242    }
7243    loader_platform_thread_unlock_mutex(&globalLock);
7244    if (VK_FALSE == skipCall)
7245        dev_data->device_dispatch_table->CmdSetBlendConstants(commandBuffer, blendConstants);
7246}
7247
7248VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7249vkCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
7250    VkBool32 skipCall = VK_FALSE;
7251    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7252    loader_platform_thread_lock_mutex(&globalLock);
7253    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7254    if (pCB) {
7255        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
7256        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
7257    }
7258    loader_platform_thread_unlock_mutex(&globalLock);
7259    if (VK_FALSE == skipCall)
7260        dev_data->device_dispatch_table->CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
7261}
7262
7263VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7264vkCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
7265    VkBool32 skipCall = VK_FALSE;
7266    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7267    loader_platform_thread_lock_mutex(&globalLock);
7268    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7269    if (pCB) {
7270        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
7271        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
7272    }
7273    loader_platform_thread_unlock_mutex(&globalLock);
7274    if (VK_FALSE == skipCall)
7275        dev_data->device_dispatch_table->CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
7276}
7277
7278VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7279vkCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
7280    VkBool32 skipCall = VK_FALSE;
7281    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7282    loader_platform_thread_lock_mutex(&globalLock);
7283    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7284    if (pCB) {
7285        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
7286        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
7287    }
7288    loader_platform_thread_unlock_mutex(&globalLock);
7289    if (VK_FALSE == skipCall)
7290        dev_data->device_dispatch_table->CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
7291}
7292
7293VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7294vkCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
7295    VkBool32 skipCall = VK_FALSE;
7296    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7297    loader_platform_thread_lock_mutex(&globalLock);
7298    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7299    if (pCB) {
7300        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
7301        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
7302    }
7303    loader_platform_thread_unlock_mutex(&globalLock);
7304    if (VK_FALSE == skipCall)
7305        dev_data->device_dispatch_table->CmdSetStencilReference(commandBuffer, faceMask, reference);
7306}
7307
7308VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7309vkCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
7310                        uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
7311                        const uint32_t *pDynamicOffsets) {
7312    VkBool32 skipCall = VK_FALSE;
7313    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7314    loader_platform_thread_lock_mutex(&globalLock);
7315#if MTMERGESOURCE
7316    // MTMTODO : Merge this with code below
7317    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7318    if (cb_data != dev_data->commandBufferMap.end()) {
7319        // MTMTODO : activeDescriptorSets should be merged with lastBound.boundDescriptorSets
7320        std::vector<VkDescriptorSet> &activeDescriptorSets = cb_data->second->activeDescriptorSets;
7321        if (activeDescriptorSets.size() < (setCount + firstSet)) {
7322            activeDescriptorSets.resize(setCount + firstSet);
7323        }
7324        for (uint32_t i = 0; i < setCount; ++i) {
7325            activeDescriptorSets[i + firstSet] = pDescriptorSets[i];
7326        }
7327    }
7328    // TODO : Somewhere need to verify that all textures referenced by shaders in DS are in some type of *SHADER_READ* state
7329#endif
7330    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7331    if (pCB) {
7332        if (pCB->state == CB_RECORDING) {
7333            // Track total count of dynamic descriptor types to make sure we have an offset for each one
7334            uint32_t totalDynamicDescriptors = 0;
7335            string errorString = "";
7336            uint32_t lastSetIndex = firstSet + setCount - 1;
7337            if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size())
7338                pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7339            VkDescriptorSet oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
7340            for (uint32_t i = 0; i < setCount; i++) {
7341                SET_NODE *pSet = getSetNode(dev_data, pDescriptorSets[i]);
7342                if (pSet) {
7343                    pCB->lastBound[pipelineBindPoint].uniqueBoundSets.insert(pDescriptorSets[i]);
7344                    pSet->boundCmdBuffers.insert(commandBuffer);
7345                    pCB->lastBound[pipelineBindPoint].pipelineLayout = layout;
7346                    pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pDescriptorSets[i];
7347                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7348                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7349                                        DRAWSTATE_NONE, "DS", "DS %#" PRIxLEAST64 " bound on pipeline %s",
7350                                        (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
7351                    if (!pSet->pUpdateStructs && (pSet->descriptorCount != 0)) {
7352                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
7353                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
7354                                            __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
7355                                            "DS %#" PRIxLEAST64
7356                                            " bound but it was never updated. You may want to either update it or not bind it.",
7357                                            (uint64_t)pDescriptorSets[i]);
7358                    }
7359                    // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
7360                    if (!verify_set_layout_compatibility(dev_data, pSet, layout, i + firstSet, errorString)) {
7361                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7362                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
7363                                            __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
7364                                            "descriptorSet #%u being bound is not compatible with overlapping layout in "
7365                                            "pipelineLayout due to: %s",
7366                                            i, errorString.c_str());
7367                    }
7368                    if (pSet->pLayout->dynamicDescriptorCount) {
7369                        // First make sure we won't overstep bounds of pDynamicOffsets array
7370                        if ((totalDynamicDescriptors + pSet->pLayout->dynamicDescriptorCount) > dynamicOffsetCount) {
7371                            skipCall |=
7372                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7373                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7374                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7375                                        "descriptorSet #%u (%#" PRIxLEAST64
7376                                        ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
7377                                        "array. There must be one dynamic offset for each dynamic descriptor being bound.",
7378                                        i, (uint64_t)pDescriptorSets[i], pSet->pLayout->dynamicDescriptorCount,
7379                                        (dynamicOffsetCount - totalDynamicDescriptors));
7380                        } else { // Validate and store dynamic offsets with the set
7381                            // Validate Dynamic Offset Minimums
7382                            uint32_t cur_dyn_offset = totalDynamicDescriptors;
7383                            for (uint32_t d = 0; d < pSet->descriptorCount; d++) {
7384                                if (pSet->pLayout->descriptorTypes[d] == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
7385                                    if (vk_safe_modulo(
7386                                            pDynamicOffsets[cur_dyn_offset],
7387                                            dev_data->physDevProperties.properties.limits.minUniformBufferOffsetAlignment) !=
7388                                        0) {
7389                                        skipCall |= log_msg(
7390                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7391                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7392                                            DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
7393                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7394                                            "device limit minUniformBufferOffsetAlignment %#" PRIxLEAST64,
7395                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7396                                            dev_data->physDevProperties.properties.limits.minUniformBufferOffsetAlignment);
7397                                    }
7398                                    cur_dyn_offset++;
7399                                } else if (pSet->pLayout->descriptorTypes[d] == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
7400                                    if (vk_safe_modulo(
7401                                            pDynamicOffsets[cur_dyn_offset],
7402                                            dev_data->physDevProperties.properties.limits.minStorageBufferOffsetAlignment) !=
7403                                        0) {
7404                                        skipCall |= log_msg(
7405                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7406                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7407                                            DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
7408                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7409                                            "device limit minStorageBufferOffsetAlignment %#" PRIxLEAST64,
7410                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7411                                            dev_data->physDevProperties.properties.limits.minStorageBufferOffsetAlignment);
7412                                    }
7413                                    cur_dyn_offset++;
7414                                }
7415                            }
7416                            // Keep running total of dynamic descriptor count to verify at the end
7417                            totalDynamicDescriptors += pSet->pLayout->dynamicDescriptorCount;
7418                        }
7419                    }
7420                } else {
7421                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7422                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7423                                        DRAWSTATE_INVALID_SET, "DS", "Attempt to bind DS %#" PRIxLEAST64 " that doesn't exist!",
7424                                        (uint64_t)pDescriptorSets[i]);
7425                }
7426                skipCall |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
7427                // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
7428                if (firstSet > 0) { // Check set #s below the first bound set
7429                    for (uint32_t i = 0; i < firstSet; ++i) {
7430                        if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
7431                            !verify_set_layout_compatibility(
7432                                dev_data, dev_data->setMap[pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i]], layout, i,
7433                                errorString)) {
7434                            skipCall |= log_msg(
7435                                dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7436                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
7437                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
7438                                "DescriptorSetDS %#" PRIxLEAST64
7439                                " previously bound as set #%u was disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")",
7440                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
7441                            pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
7442                        }
7443                    }
7444                }
7445                // Check if newly last bound set invalidates any remaining bound sets
7446                if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
7447                    if (oldFinalBoundSet &&
7448                        !verify_set_layout_compatibility(dev_data, dev_data->setMap[oldFinalBoundSet], layout, lastSetIndex,
7449                                                         errorString)) {
7450                        skipCall |=
7451                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7452                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)oldFinalBoundSet, __LINE__,
7453                                    DRAWSTATE_NONE, "DS", "DescriptorSetDS %#" PRIxLEAST64
7454                                                          " previously bound as set #%u is incompatible with set %#" PRIxLEAST64
7455                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
7456                                                          "disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")",
7457                                    (uint64_t)oldFinalBoundSet, lastSetIndex,
7458                                    (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
7459                                    lastSetIndex + 1, (uint64_t)layout);
7460                        pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7461                    }
7462                }
7463                //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7464                if (totalDynamicDescriptors != dynamicOffsetCount) {
7465                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7466                                        VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
7467                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7468                                        "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7469                                        "is %u. It should exactly match the number of dynamic descriptors.",
7470                                        setCount, totalDynamicDescriptors, dynamicOffsetCount);
7471                }
7472                // Save dynamicOffsets bound to this CB
7473                for (uint32_t i = 0; i < dynamicOffsetCount; i++) {
7474                    pCB->lastBound[pipelineBindPoint].dynamicOffsets.push_back(pDynamicOffsets[i]);
7475                }
7476            }
7477            //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7478            if (totalDynamicDescriptors != dynamicOffsetCount) {
7479                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7480                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
7481                                    DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7482                                    "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7483                                    "is %u. It should exactly match the number of dynamic descriptors.",
7484                                    setCount, totalDynamicDescriptors, dynamicOffsetCount);
7485            }
7486            // Save dynamicOffsets bound to this CB
7487            for (uint32_t i = 0; i < dynamicOffsetCount; i++) {
7488                pCB->dynamicOffsets.emplace_back(pDynamicOffsets[i]);
7489            }
7490        } else {
7491            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
7492        }
7493    }
7494    loader_platform_thread_unlock_mutex(&globalLock);
7495    if (VK_FALSE == skipCall)
7496        dev_data->device_dispatch_table->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
7497                                                               pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
7498}
7499
7500VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7501vkCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
7502    VkBool32 skipCall = VK_FALSE;
7503    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7504    loader_platform_thread_lock_mutex(&globalLock);
7505#if MTMERGESOURCE
7506    VkDeviceMemory mem;
7507    skipCall =
7508        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7509    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7510    if (cb_data != dev_data->commandBufferMap.end()) {
7511        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindIndexBuffer()"); };
7512        cb_data->second->validate_functions.push_back(function);
7513    }
7514    // TODO : Somewhere need to verify that IBs have correct usage state flagged
7515#endif
7516    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7517    if (pCB) {
7518        skipCall |= addCmd(dev_data, pCB, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
7519        VkDeviceSize offset_align = 0;
7520        switch (indexType) {
7521        case VK_INDEX_TYPE_UINT16:
7522            offset_align = 2;
7523            break;
7524        case VK_INDEX_TYPE_UINT32:
7525            offset_align = 4;
7526            break;
7527        default:
7528            // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
7529            break;
7530        }
7531        if (!offset_align || (offset % offset_align)) {
7532            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7533                                DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
7534                                "vkCmdBindIndexBuffer() offset (%#" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
7535                                offset, string_VkIndexType(indexType));
7536        }
7537        pCB->status |= CBSTATUS_INDEX_BUFFER_BOUND;
7538    }
7539    loader_platform_thread_unlock_mutex(&globalLock);
7540    if (VK_FALSE == skipCall)
7541        dev_data->device_dispatch_table->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
7542}
7543
7544void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
7545    uint32_t end = firstBinding + bindingCount;
7546    if (pCB->currentDrawData.buffers.size() < end) {
7547        pCB->currentDrawData.buffers.resize(end);
7548    }
7549    for (uint32_t i = 0; i < bindingCount; ++i) {
7550        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
7551    }
7552}
7553
7554void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
7555
7556VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
7557                                                                  uint32_t bindingCount, const VkBuffer *pBuffers,
7558                                                                  const VkDeviceSize *pOffsets) {
7559    VkBool32 skipCall = VK_FALSE;
7560    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7561    loader_platform_thread_lock_mutex(&globalLock);
7562#if MTMERGESOURCE
7563    for (uint32_t i = 0; i < bindingCount; ++i) {
7564        VkDeviceMemory mem;
7565        skipCall |= get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)(pBuffers[i]),
7566                                                 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7567        auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7568        if (cb_data != dev_data->commandBufferMap.end()) {
7569            std::function<VkBool32()> function =
7570                [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindVertexBuffers()"); };
7571            cb_data->second->validate_functions.push_back(function);
7572        }
7573    }
7574    // TODO : Somewhere need to verify that VBs have correct usage state flagged
7575#endif
7576    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7577    if (pCB) {
7578        addCmd(dev_data, pCB, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
7579        updateResourceTracking(pCB, firstBinding, bindingCount, pBuffers);
7580    } else {
7581        skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
7582    }
7583    loader_platform_thread_unlock_mutex(&globalLock);
7584    if (VK_FALSE == skipCall)
7585        dev_data->device_dispatch_table->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
7586}
7587
7588#if MTMERGESOURCE
7589/* expects globalLock to be held by caller */
7590bool markStoreImagesAndBuffersAsWritten(VkCommandBuffer commandBuffer) {
7591    bool skip_call = false;
7592    layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7593    auto cb_data = my_data->commandBufferMap.find(commandBuffer);
7594    if (cb_data == my_data->commandBufferMap.end())
7595        return skip_call;
7596    std::vector<VkDescriptorSet> &activeDescriptorSets = cb_data->second->activeDescriptorSets;
7597    for (auto descriptorSet : activeDescriptorSets) {
7598        auto ds_data = my_data->descriptorSetMap.find(descriptorSet);
7599        if (ds_data == my_data->descriptorSetMap.end())
7600            continue;
7601        std::vector<VkImageView> images = ds_data->second.images;
7602        std::vector<VkBuffer> buffers = ds_data->second.buffers;
7603        for (auto imageView : images) {
7604            auto iv_data = my_data->imageViewMap.find(imageView);
7605            if (iv_data == my_data->imageViewMap.end())
7606                continue;
7607            VkImage image = iv_data->second.image;
7608            VkDeviceMemory mem;
7609            skip_call |=
7610                get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7611            std::function<VkBool32()> function = [=]() {
7612                set_memory_valid(my_data, mem, true, image);
7613                return VK_FALSE;
7614            };
7615            cb_data->second->validate_functions.push_back(function);
7616        }
7617        for (auto buffer : buffers) {
7618            VkDeviceMemory mem;
7619            skip_call |=
7620                get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7621            std::function<VkBool32()> function = [=]() {
7622                set_memory_valid(my_data, mem, true);
7623                return VK_FALSE;
7624            };
7625            cb_data->second->validate_functions.push_back(function);
7626        }
7627    }
7628    return skip_call;
7629}
7630#endif
7631
7632VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
7633                                                     uint32_t firstVertex, uint32_t firstInstance) {
7634    VkBool32 skipCall = VK_FALSE;
7635    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7636    loader_platform_thread_lock_mutex(&globalLock);
7637#if MTMERGESOURCE
7638    // MTMTODO : merge with code below
7639    skipCall = markStoreImagesAndBuffersAsWritten(commandBuffer);
7640#endif
7641    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7642    if (pCB) {
7643        skipCall |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()");
7644        pCB->drawCount[DRAW]++;
7645        skipCall |= validate_draw_state(dev_data, pCB, VK_FALSE);
7646        // TODO : Need to pass commandBuffer as srcObj here
7647        skipCall |=
7648            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7649                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW]++);
7650        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7651        if (VK_FALSE == skipCall) {
7652            updateResourceTrackingOnDraw(pCB);
7653        }
7654        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDraw");
7655    }
7656    loader_platform_thread_unlock_mutex(&globalLock);
7657    if (VK_FALSE == skipCall)
7658        dev_data->device_dispatch_table->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
7659}
7660
7661VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
7662                                                            uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
7663                                                            uint32_t firstInstance) {
7664    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7665    VkBool32 skipCall = VK_FALSE;
7666    loader_platform_thread_lock_mutex(&globalLock);
7667#if MTMERGESOURCE
7668    // MTMTODO : merge with code below
7669    skipCall = markStoreImagesAndBuffersAsWritten(commandBuffer);
7670#endif
7671    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7672    if (pCB) {
7673        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()");
7674        pCB->drawCount[DRAW_INDEXED]++;
7675        skipCall |= validate_draw_state(dev_data, pCB, VK_TRUE);
7676        // TODO : Need to pass commandBuffer as srcObj here
7677        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7678                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7679                            "vkCmdDrawIndexed() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++);
7680        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7681        if (VK_FALSE == skipCall) {
7682            updateResourceTrackingOnDraw(pCB);
7683        }
7684        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed");
7685    }
7686    loader_platform_thread_unlock_mutex(&globalLock);
7687    if (VK_FALSE == skipCall)
7688        dev_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset,
7689                                                        firstInstance);
7690}
7691
7692VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7693vkCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7694    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7695    VkBool32 skipCall = VK_FALSE;
7696    loader_platform_thread_lock_mutex(&globalLock);
7697#if MTMERGESOURCE
7698    VkDeviceMemory mem;
7699    // MTMTODO : merge with code below
7700    skipCall =
7701        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7702    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndirect");
7703    skipCall |= markStoreImagesAndBuffersAsWritten(commandBuffer);
7704#endif
7705    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7706    if (pCB) {
7707        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()");
7708        pCB->drawCount[DRAW_INDIRECT]++;
7709        skipCall |= validate_draw_state(dev_data, pCB, VK_FALSE);
7710        // TODO : Need to pass commandBuffer as srcObj here
7711        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7712                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7713                            "vkCmdDrawIndirect() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++);
7714        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7715        if (VK_FALSE == skipCall) {
7716            updateResourceTrackingOnDraw(pCB);
7717        }
7718        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndirect");
7719    }
7720    loader_platform_thread_unlock_mutex(&globalLock);
7721    if (VK_FALSE == skipCall)
7722        dev_data->device_dispatch_table->CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
7723}
7724
7725VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7726vkCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7727    VkBool32 skipCall = VK_FALSE;
7728    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7729    loader_platform_thread_lock_mutex(&globalLock);
7730#if MTMERGESOURCE
7731    VkDeviceMemory mem;
7732    // MTMTODO : merge with code below
7733    skipCall =
7734        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7735    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndexedIndirect");
7736    skipCall |= markStoreImagesAndBuffersAsWritten(commandBuffer);
7737#endif
7738    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7739    if (pCB) {
7740        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()");
7741        pCB->drawCount[DRAW_INDEXED_INDIRECT]++;
7742        skipCall |= validate_draw_state(dev_data, pCB, VK_TRUE);
7743        // TODO : Need to pass commandBuffer as srcObj here
7744        skipCall |=
7745            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7746                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call #%" PRIu64 ", reporting DS state:",
7747                    g_drawCount[DRAW_INDEXED_INDIRECT]++);
7748        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7749        if (VK_FALSE == skipCall) {
7750            updateResourceTrackingOnDraw(pCB);
7751        }
7752        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexedIndirect");
7753    }
7754    loader_platform_thread_unlock_mutex(&globalLock);
7755    if (VK_FALSE == skipCall)
7756        dev_data->device_dispatch_table->CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
7757}
7758
7759VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
7760    VkBool32 skipCall = VK_FALSE;
7761    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7762    loader_platform_thread_lock_mutex(&globalLock);
7763#if MTMERGESOURCE
7764    skipCall = markStoreImagesAndBuffersAsWritten(commandBuffer);
7765#endif
7766    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7767    if (pCB) {
7768        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()");
7769        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatch");
7770    }
7771    loader_platform_thread_unlock_mutex(&globalLock);
7772    if (VK_FALSE == skipCall)
7773        dev_data->device_dispatch_table->CmdDispatch(commandBuffer, x, y, z);
7774}
7775
7776VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7777vkCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
7778    VkBool32 skipCall = VK_FALSE;
7779    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7780    loader_platform_thread_lock_mutex(&globalLock);
7781#if MTMERGESOURCE
7782    VkDeviceMemory mem;
7783    skipCall =
7784        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7785    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDispatchIndirect");
7786    skipCall |= markStoreImagesAndBuffersAsWritten(commandBuffer);
7787#endif
7788    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7789    if (pCB) {
7790        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()");
7791        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatchIndirect");
7792    }
7793    loader_platform_thread_unlock_mutex(&globalLock);
7794    if (VK_FALSE == skipCall)
7795        dev_data->device_dispatch_table->CmdDispatchIndirect(commandBuffer, buffer, offset);
7796}
7797
7798VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
7799                                                           uint32_t regionCount, const VkBufferCopy *pRegions) {
7800    VkBool32 skipCall = VK_FALSE;
7801    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7802    loader_platform_thread_lock_mutex(&globalLock);
7803#if MTMERGESOURCE
7804    VkDeviceMemory mem;
7805    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7806    skipCall =
7807        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7808    if (cb_data != dev_data->commandBufferMap.end()) {
7809        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyBuffer()"); };
7810        cb_data->second->validate_functions.push_back(function);
7811    }
7812    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBuffer");
7813    skipCall |=
7814        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7815    if (cb_data != dev_data->commandBufferMap.end()) {
7816        std::function<VkBool32()> function = [=]() {
7817            set_memory_valid(dev_data, mem, true);
7818            return VK_FALSE;
7819        };
7820        cb_data->second->validate_functions.push_back(function);
7821    }
7822    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBuffer");
7823    // Validate that SRC & DST buffers have correct usage flags set
7824    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
7825                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7826    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7827                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7828#endif
7829    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7830    if (pCB) {
7831        skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
7832        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBuffer");
7833    }
7834    loader_platform_thread_unlock_mutex(&globalLock);
7835    if (VK_FALSE == skipCall)
7836        dev_data->device_dispatch_table->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
7837}
7838
7839VkBool32 VerifySourceImageLayout(VkCommandBuffer cmdBuffer, VkImage srcImage, VkImageSubresourceLayers subLayers,
7840                                 VkImageLayout srcImageLayout) {
7841    VkBool32 skip_call = VK_FALSE;
7842
7843    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7844    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7845    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7846        uint32_t layer = i + subLayers.baseArrayLayer;
7847        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7848        IMAGE_CMD_BUF_LAYOUT_NODE node;
7849        if (!FindLayout(pCB, srcImage, sub, node)) {
7850            SetLayout(pCB, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout));
7851            continue;
7852        }
7853        if (node.layout != srcImageLayout) {
7854            // TODO: Improve log message in the next pass
7855            skip_call |=
7856                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7857                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
7858                                                                        "and doesn't match the current layout %s.",
7859                        string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
7860        }
7861    }
7862    if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
7863        if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7864            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7865            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7866                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7867                                 "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
7868        } else {
7869            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7870                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be "
7871                                                                       "TRANSFER_SRC_OPTIMAL or GENERAL.",
7872                                 string_VkImageLayout(srcImageLayout));
7873        }
7874    }
7875    return skip_call;
7876}
7877
7878VkBool32 VerifyDestImageLayout(VkCommandBuffer cmdBuffer, VkImage destImage, VkImageSubresourceLayers subLayers,
7879                               VkImageLayout destImageLayout) {
7880    VkBool32 skip_call = VK_FALSE;
7881
7882    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7883    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7884    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7885        uint32_t layer = i + subLayers.baseArrayLayer;
7886        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7887        IMAGE_CMD_BUF_LAYOUT_NODE node;
7888        if (!FindLayout(pCB, destImage, sub, node)) {
7889            SetLayout(pCB, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout));
7890            continue;
7891        }
7892        if (node.layout != destImageLayout) {
7893            skip_call |=
7894                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7895                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
7896                                                                        "doesn't match the current layout %s.",
7897                        string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
7898        }
7899    }
7900    if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
7901        if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7902            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7903            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7904                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7905                                 "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
7906        } else {
7907            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7908                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be "
7909                                                                       "TRANSFER_DST_OPTIMAL or GENERAL.",
7910                                 string_VkImageLayout(destImageLayout));
7911        }
7912    }
7913    return skip_call;
7914}
7915
7916VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7917vkCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7918               VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
7919    VkBool32 skipCall = VK_FALSE;
7920    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7921    loader_platform_thread_lock_mutex(&globalLock);
7922#if MTMERGESOURCE
7923    VkDeviceMemory mem;
7924    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7925    // Validate that src & dst images have correct usage flags set
7926    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7927    if (cb_data != dev_data->commandBufferMap.end()) {
7928        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyImage()", srcImage); };
7929        cb_data->second->validate_functions.push_back(function);
7930    }
7931    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImage");
7932    skipCall |=
7933        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7934    if (cb_data != dev_data->commandBufferMap.end()) {
7935        std::function<VkBool32()> function = [=]() {
7936            set_memory_valid(dev_data, mem, true, dstImage);
7937            return VK_FALSE;
7938        };
7939        cb_data->second->validate_functions.push_back(function);
7940    }
7941    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImage");
7942    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7943                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7944    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7945                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7946#endif
7947    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7948    if (pCB) {
7949        skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGE, "vkCmdCopyImage()");
7950        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImage");
7951        for (uint32_t i = 0; i < regionCount; ++i) {
7952            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].srcSubresource, srcImageLayout);
7953            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].dstSubresource, dstImageLayout);
7954        }
7955    }
7956    loader_platform_thread_unlock_mutex(&globalLock);
7957    if (VK_FALSE == skipCall)
7958        dev_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7959                                                      regionCount, pRegions);
7960}
7961
7962VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7963vkCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7964               VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
7965    VkBool32 skipCall = VK_FALSE;
7966    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7967    loader_platform_thread_lock_mutex(&globalLock);
7968#if MTMERGESOURCE
7969    VkDeviceMemory mem;
7970    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7971    // Validate that src & dst images have correct usage flags set
7972    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7973    if (cb_data != dev_data->commandBufferMap.end()) {
7974        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBlitImage()", srcImage); };
7975        cb_data->second->validate_functions.push_back(function);
7976    }
7977    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdBlitImage");
7978    skipCall |=
7979        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7980    if (cb_data != dev_data->commandBufferMap.end()) {
7981        std::function<VkBool32()> function = [=]() {
7982            set_memory_valid(dev_data, mem, true, dstImage);
7983            return VK_FALSE;
7984        };
7985        cb_data->second->validate_functions.push_back(function);
7986    }
7987    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdBlitImage");
7988    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7989                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7990    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7991                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7992#endif
7993    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7994    if (pCB) {
7995        skipCall |= addCmd(dev_data, pCB, CMD_BLITIMAGE, "vkCmdBlitImage()");
7996        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBlitImage");
7997    }
7998    loader_platform_thread_unlock_mutex(&globalLock);
7999    if (VK_FALSE == skipCall)
8000        dev_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
8001                                                      regionCount, pRegions, filter);
8002}
8003
8004VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
8005                                                                  VkImage dstImage, VkImageLayout dstImageLayout,
8006                                                                  uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8007    VkBool32 skipCall = VK_FALSE;
8008    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8009    loader_platform_thread_lock_mutex(&globalLock);
8010#if MTMERGESOURCE
8011    VkDeviceMemory mem;
8012    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8013    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8014    if (cb_data != dev_data->commandBufferMap.end()) {
8015        std::function<VkBool32()> function = [=]() {
8016            set_memory_valid(dev_data, mem, true, dstImage);
8017            return VK_FALSE;
8018        };
8019        cb_data->second->validate_functions.push_back(function);
8020    }
8021    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBufferToImage");
8022    skipCall |=
8023        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8024    if (cb_data != dev_data->commandBufferMap.end()) {
8025        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyBufferToImage()"); };
8026        cb_data->second->validate_functions.push_back(function);
8027    }
8028    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBufferToImage");
8029    // Validate that src buff & dst image have correct usage flags set
8030    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
8031                                            "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
8032    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8033                                           "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8034#endif
8035    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8036    if (pCB) {
8037        skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
8038        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBufferToImage");
8039        for (uint32_t i = 0; i < regionCount; ++i) {
8040            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].imageSubresource, dstImageLayout);
8041        }
8042    }
8043    loader_platform_thread_unlock_mutex(&globalLock);
8044    if (VK_FALSE == skipCall)
8045        dev_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount,
8046                                                              pRegions);
8047}
8048
8049VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
8050                                                                  VkImageLayout srcImageLayout, VkBuffer dstBuffer,
8051                                                                  uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8052    VkBool32 skipCall = VK_FALSE;
8053    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8054    loader_platform_thread_lock_mutex(&globalLock);
8055#if MTMERGESOURCE
8056    VkDeviceMemory mem;
8057    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8058    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8059    if (cb_data != dev_data->commandBufferMap.end()) {
8060        std::function<VkBool32()> function =
8061            [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyImageToBuffer()", srcImage); };
8062        cb_data->second->validate_functions.push_back(function);
8063    }
8064    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImageToBuffer");
8065    skipCall |=
8066        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8067    if (cb_data != dev_data->commandBufferMap.end()) {
8068        std::function<VkBool32()> function = [=]() {
8069            set_memory_valid(dev_data, mem, true);
8070            return VK_FALSE;
8071        };
8072        cb_data->second->validate_functions.push_back(function);
8073    }
8074    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImageToBuffer");
8075    // Validate that dst buff & src image have correct usage flags set
8076    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8077                                           "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8078    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8079                                            "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8080#endif
8081    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8082    if (pCB) {
8083        skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
8084        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImageToBuffer");
8085        for (uint32_t i = 0; i < regionCount; ++i) {
8086            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].imageSubresource, srcImageLayout);
8087        }
8088    }
8089    loader_platform_thread_unlock_mutex(&globalLock);
8090    if (VK_FALSE == skipCall)
8091        dev_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount,
8092                                                              pRegions);
8093}
8094
8095VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
8096                                                             VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
8097    VkBool32 skipCall = VK_FALSE;
8098    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8099    loader_platform_thread_lock_mutex(&globalLock);
8100#if MTMERGESOURCE
8101    VkDeviceMemory mem;
8102    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8103    skipCall =
8104        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8105    if (cb_data != dev_data->commandBufferMap.end()) {
8106        std::function<VkBool32()> function = [=]() {
8107            set_memory_valid(dev_data, mem, true);
8108            return VK_FALSE;
8109        };
8110        cb_data->second->validate_functions.push_back(function);
8111    }
8112    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdUpdateBuffer");
8113    // Validate that dst buff has correct usage flags set
8114    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8115                                            "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8116#endif
8117    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8118    if (pCB) {
8119        skipCall |= addCmd(dev_data, pCB, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
8120        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyUpdateBuffer");
8121    }
8122    loader_platform_thread_unlock_mutex(&globalLock);
8123    if (VK_FALSE == skipCall)
8124        dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
8125}
8126
8127VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8128vkCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
8129    VkBool32 skipCall = VK_FALSE;
8130    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8131    loader_platform_thread_lock_mutex(&globalLock);
8132#if MTMERGESOURCE
8133    VkDeviceMemory mem;
8134    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8135    skipCall =
8136        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8137    if (cb_data != dev_data->commandBufferMap.end()) {
8138        std::function<VkBool32()> function = [=]() {
8139            set_memory_valid(dev_data, mem, true);
8140            return VK_FALSE;
8141        };
8142        cb_data->second->validate_functions.push_back(function);
8143    }
8144    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdFillBuffer");
8145    // Validate that dst buff has correct usage flags set
8146    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8147                                            "vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8148#endif
8149    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8150    if (pCB) {
8151        skipCall |= addCmd(dev_data, pCB, CMD_FILLBUFFER, "vkCmdFillBuffer()");
8152        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyFillBuffer");
8153    }
8154    loader_platform_thread_unlock_mutex(&globalLock);
8155    if (VK_FALSE == skipCall)
8156        dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
8157}
8158
8159VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
8160                                                                 const VkClearAttachment *pAttachments, uint32_t rectCount,
8161                                                                 const VkClearRect *pRects) {
8162    VkBool32 skipCall = VK_FALSE;
8163    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8164    loader_platform_thread_lock_mutex(&globalLock);
8165    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8166    if (pCB) {
8167        skipCall |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
8168        // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
8169        if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
8170            (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
8171            // TODO : commandBuffer should be srcObj
8172            // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
8173            // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
8174            // call CmdClearAttachments
8175            // Otherwise this seems more like a performance warning.
8176            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8177                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
8178                                "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
8179                                " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
8180                                (uint64_t)(commandBuffer));
8181        }
8182        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments");
8183    }
8184
8185    // Validate that attachment is in reference list of active subpass
8186    if (pCB->activeRenderPass) {
8187        const VkRenderPassCreateInfo *pRPCI = dev_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo;
8188        const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
8189
8190        for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) {
8191            const VkClearAttachment *attachment = &pAttachments[attachment_idx];
8192            if (attachment->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
8193                VkBool32 found = VK_FALSE;
8194                for (uint32_t i = 0; i < pSD->colorAttachmentCount; i++) {
8195                    if (attachment->colorAttachment == pSD->pColorAttachments[i].attachment) {
8196                        found = VK_TRUE;
8197                        break;
8198                    }
8199                }
8200                if (VK_FALSE == found) {
8201                    skipCall |= log_msg(
8202                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8203                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8204                        "vkCmdClearAttachments() attachment index %d not found in attachment reference array of active subpass %d",
8205                        attachment->colorAttachment, pCB->activeSubpass);
8206                }
8207            } else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
8208                if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
8209                    (pSD->pDepthStencilAttachment->attachment ==
8210                     VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
8211
8212                    skipCall |= log_msg(
8213                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8214                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8215                        "vkCmdClearAttachments() attachment index %d does not match depthStencilAttachment.attachment (%d) found "
8216                        "in active subpass %d",
8217                        attachment->colorAttachment,
8218                        (pSD->pDepthStencilAttachment) ? pSD->pDepthStencilAttachment->attachment : VK_ATTACHMENT_UNUSED,
8219                        pCB->activeSubpass);
8220                }
8221            }
8222        }
8223    }
8224    loader_platform_thread_unlock_mutex(&globalLock);
8225    if (VK_FALSE == skipCall)
8226        dev_data->device_dispatch_table->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
8227}
8228
8229VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
8230                                                                VkImageLayout imageLayout, const VkClearColorValue *pColor,
8231                                                                uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
8232    VkBool32 skipCall = VK_FALSE;
8233    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8234    loader_platform_thread_lock_mutex(&globalLock);
8235#if MTMERGESOURCE
8236    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8237    VkDeviceMemory mem;
8238    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8239    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8240    if (cb_data != dev_data->commandBufferMap.end()) {
8241        std::function<VkBool32()> function = [=]() {
8242            set_memory_valid(dev_data, mem, true, image);
8243            return VK_FALSE;
8244        };
8245        cb_data->second->validate_functions.push_back(function);
8246    }
8247    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearColorImage");
8248#endif
8249    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8250    if (pCB) {
8251        skipCall |= addCmd(dev_data, pCB, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
8252        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearColorImage");
8253    }
8254    loader_platform_thread_unlock_mutex(&globalLock);
8255    if (VK_FALSE == skipCall)
8256        dev_data->device_dispatch_table->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
8257}
8258
8259VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8260vkCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
8261                            const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
8262                            const VkImageSubresourceRange *pRanges) {
8263    VkBool32 skipCall = VK_FALSE;
8264    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8265    loader_platform_thread_lock_mutex(&globalLock);
8266#if MTMERGESOURCE
8267    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8268    VkDeviceMemory mem;
8269    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8270    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8271    if (cb_data != dev_data->commandBufferMap.end()) {
8272        std::function<VkBool32()> function = [=]() {
8273            set_memory_valid(dev_data, mem, true, image);
8274            return VK_FALSE;
8275        };
8276        cb_data->second->validate_functions.push_back(function);
8277    }
8278    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearDepthStencilImage");
8279#endif
8280    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8281    if (pCB) {
8282        skipCall |= addCmd(dev_data, pCB, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
8283        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearDepthStencilImage");
8284    }
8285    loader_platform_thread_unlock_mutex(&globalLock);
8286    if (VK_FALSE == skipCall)
8287        dev_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount,
8288                                                                   pRanges);
8289}
8290
8291VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8292vkCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8293                  VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
8294    VkBool32 skipCall = VK_FALSE;
8295    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8296    loader_platform_thread_lock_mutex(&globalLock);
8297#if MTMERGESOURCE
8298    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8299    VkDeviceMemory mem;
8300    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8301    if (cb_data != dev_data->commandBufferMap.end()) {
8302        std::function<VkBool32()> function =
8303            [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdResolveImage()", srcImage); };
8304        cb_data->second->validate_functions.push_back(function);
8305    }
8306    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdResolveImage");
8307    skipCall |=
8308        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8309    if (cb_data != dev_data->commandBufferMap.end()) {
8310        std::function<VkBool32()> function = [=]() {
8311            set_memory_valid(dev_data, mem, true, dstImage);
8312            return VK_FALSE;
8313        };
8314        cb_data->second->validate_functions.push_back(function);
8315    }
8316    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdResolveImage");
8317#endif
8318    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8319    if (pCB) {
8320        skipCall |= addCmd(dev_data, pCB, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
8321        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResolveImage");
8322    }
8323    loader_platform_thread_unlock_mutex(&globalLock);
8324    if (VK_FALSE == skipCall)
8325        dev_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
8326                                                         regionCount, pRegions);
8327}
8328
8329bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8330    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8331    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8332    if (pCB) {
8333        pCB->eventToStageMap[event] = stageMask;
8334    }
8335    auto queue_data = dev_data->queueMap.find(queue);
8336    if (queue_data != dev_data->queueMap.end()) {
8337        queue_data->second.eventToStageMap[event] = stageMask;
8338    }
8339    return false;
8340}
8341
8342VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8343vkCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8344    VkBool32 skipCall = VK_FALSE;
8345    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8346    loader_platform_thread_lock_mutex(&globalLock);
8347    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8348    if (pCB) {
8349        skipCall |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
8350        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent");
8351        pCB->events.push_back(event);
8352        std::function<bool(VkQueue)> eventUpdate =
8353            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
8354        pCB->eventUpdates.push_back(eventUpdate);
8355    }
8356    loader_platform_thread_unlock_mutex(&globalLock);
8357    if (VK_FALSE == skipCall)
8358        dev_data->device_dispatch_table->CmdSetEvent(commandBuffer, event, stageMask);
8359}
8360
8361VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8362vkCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8363    VkBool32 skipCall = VK_FALSE;
8364    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8365    loader_platform_thread_lock_mutex(&globalLock);
8366    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8367    if (pCB) {
8368        skipCall |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
8369        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent");
8370        pCB->events.push_back(event);
8371        std::function<bool(VkQueue)> eventUpdate =
8372            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
8373        pCB->eventUpdates.push_back(eventUpdate);
8374    }
8375    loader_platform_thread_unlock_mutex(&globalLock);
8376    if (VK_FALSE == skipCall)
8377        dev_data->device_dispatch_table->CmdResetEvent(commandBuffer, event, stageMask);
8378}
8379
8380VkBool32 TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount, const VkImageMemoryBarrier *pImgMemBarriers) {
8381    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8382    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8383    VkBool32 skip = VK_FALSE;
8384    uint32_t levelCount = 0;
8385    uint32_t layerCount = 0;
8386
8387    for (uint32_t i = 0; i < memBarrierCount; ++i) {
8388        auto mem_barrier = &pImgMemBarriers[i];
8389        if (!mem_barrier)
8390            continue;
8391        // TODO: Do not iterate over every possibility - consolidate where
8392        // possible
8393        ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
8394
8395        for (uint32_t j = 0; j < levelCount; j++) {
8396            uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
8397            for (uint32_t k = 0; k < layerCount; k++) {
8398                uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
8399                VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer};
8400                IMAGE_CMD_BUF_LAYOUT_NODE node;
8401                if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
8402                    SetLayout(pCB, mem_barrier->image, sub,
8403                              IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout));
8404                    continue;
8405                }
8406                if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
8407                    // TODO: Set memory invalid which is in mem_tracker currently
8408                } else if (node.layout != mem_barrier->oldLayout) {
8409                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8410                                    __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s "
8411                                                                                    "when current layout is %s.",
8412                                    string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
8413                }
8414                SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
8415            }
8416        }
8417    }
8418    return skip;
8419}
8420
8421// Print readable FlagBits in FlagMask
8422std::string string_VkAccessFlags(VkAccessFlags accessMask) {
8423    std::string result;
8424    std::string separator;
8425
8426    if (accessMask == 0) {
8427        result = "[None]";
8428    } else {
8429        result = "[";
8430        for (auto i = 0; i < 32; i++) {
8431            if (accessMask & (1 << i)) {
8432                result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
8433                separator = " | ";
8434            }
8435        }
8436        result = result + "]";
8437    }
8438    return result;
8439}
8440
8441// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
8442// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
8443// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
8444VkBool32 ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8445                          const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits, const char *type) {
8446    VkBool32 skip_call = VK_FALSE;
8447
8448    if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
8449        if (accessMask & !(required_bit | optional_bits)) {
8450            // TODO: Verify against Valid Use
8451            skip_call |=
8452                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8453                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.",
8454                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8455        }
8456    } else {
8457        if (!required_bit) {
8458            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8459                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
8460                                                                  "%s when layout is %s, unless the app has previously added a "
8461                                                                  "barrier for this transition.",
8462                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
8463                                 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
8464        } else {
8465            std::string opt_bits;
8466            if (optional_bits != 0) {
8467                std::stringstream ss;
8468                ss << optional_bits;
8469                opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
8470            }
8471            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8472                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
8473                                                                  "layout is %s, unless the app has previously added a barrier for "
8474                                                                  "this transition.",
8475                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
8476                                 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
8477        }
8478    }
8479    return skip_call;
8480}
8481
8482VkBool32 ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8483                                     const VkImageLayout &layout, const char *type) {
8484    VkBool32 skip_call = VK_FALSE;
8485    switch (layout) {
8486    case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
8487        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
8488                                      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type);
8489        break;
8490    }
8491    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
8492        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
8493                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type);
8494        break;
8495    }
8496    case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
8497        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
8498        break;
8499    }
8500    case VK_IMAGE_LAYOUT_PREINITIALIZED: {
8501        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type);
8502        break;
8503    }
8504    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
8505        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8506                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8507        break;
8508    }
8509    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
8510        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8511                                      VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8512        break;
8513    }
8514    case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
8515        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
8516        break;
8517    }
8518    case VK_IMAGE_LAYOUT_UNDEFINED: {
8519        if (accessMask != 0) {
8520            // TODO: Verify against Valid Use section spec
8521            skip_call |=
8522                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8523                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.",
8524                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8525        }
8526        break;
8527    }
8528    case VK_IMAGE_LAYOUT_GENERAL:
8529    default: { break; }
8530    }
8531    return skip_call;
8532}
8533
8534VkBool32 ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8535                          const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
8536                          const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
8537                          const VkImageMemoryBarrier *pImageMemBarriers) {
8538    VkBool32 skip_call = VK_FALSE;
8539    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8540    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8541    if (pCB->activeRenderPass && memBarrierCount) {
8542        if (!dev_data->renderPassMap[pCB->activeRenderPass]->hasSelfDependency[pCB->activeSubpass]) {
8543            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8544                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
8545                                                                  "with no self dependency specified.",
8546                                 funcName, pCB->activeSubpass);
8547        }
8548    }
8549    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
8550        auto mem_barrier = &pImageMemBarriers[i];
8551        auto image_data = dev_data->imageMap.find(mem_barrier->image);
8552        if (image_data != dev_data->imageMap.end()) {
8553            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
8554            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
8555            if (image_data->second.createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
8556                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
8557                // be VK_QUEUE_FAMILY_IGNORED
8558                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
8559                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8560                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8561                                         "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
8562                                         "VK_SHARING_MODE_CONCURRENT.  Src and dst "
8563                                         " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
8564                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8565                }
8566            } else {
8567                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
8568                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
8569                // or both be a valid queue family
8570                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
8571                    (src_q_f_index != dst_q_f_index)) {
8572                    skip_call |=
8573                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8574                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
8575                                                                     "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
8576                                                                     "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
8577                                                                     "must be.",
8578                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8579                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
8580                           ((src_q_f_index >= dev_data->physDevProperties.queue_family_properties.size()) ||
8581                            (dst_q_f_index >= dev_data->physDevProperties.queue_family_properties.size()))) {
8582                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8583                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8584                                         "%s: Image 0x%" PRIx64 " was created with sharingMode "
8585                                         "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
8586                                         " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
8587                                         "queueFamilies crated for this device.",
8588                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index,
8589                                         dst_q_f_index, dev_data->physDevProperties.queue_family_properties.size());
8590                }
8591            }
8592        }
8593
8594        if (mem_barrier) {
8595            skip_call |=
8596                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
8597            skip_call |=
8598                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
8599            if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
8600                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8601                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
8602                                                         "PREINITIALIZED.",
8603                        funcName);
8604            }
8605            auto image_data = dev_data->imageMap.find(mem_barrier->image);
8606            VkFormat format;
8607            uint32_t arrayLayers, mipLevels;
8608            bool imageFound = false;
8609            if (image_data != dev_data->imageMap.end()) {
8610                format = image_data->second.createInfo.format;
8611                arrayLayers = image_data->second.createInfo.arrayLayers;
8612                mipLevels = image_data->second.createInfo.mipLevels;
8613                imageFound = true;
8614            } else if (dev_data->device_extensions.wsi_enabled) {
8615                auto imageswap_data = dev_data->device_extensions.imageToSwapchainMap.find(mem_barrier->image);
8616                if (imageswap_data != dev_data->device_extensions.imageToSwapchainMap.end()) {
8617                    auto swapchain_data = dev_data->device_extensions.swapchainMap.find(imageswap_data->second);
8618                    if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
8619                        format = swapchain_data->second->createInfo.imageFormat;
8620                        arrayLayers = swapchain_data->second->createInfo.imageArrayLayers;
8621                        mipLevels = 1;
8622                        imageFound = true;
8623                    }
8624                }
8625            }
8626            if (imageFound) {
8627                if (vk_format_is_depth_and_stencil(format) &&
8628                    (!(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) ||
8629                     !(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT))) {
8630                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8631                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth and stencil format and thus must "
8632                                                             "have both VK_IMAGE_ASPECT_DEPTH_BIT and "
8633                                                             "VK_IMAGE_ASPECT_STENCIL_BIT set.",
8634                            funcName);
8635                }
8636                int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
8637                                     ? 1
8638                                     : mem_barrier->subresourceRange.layerCount;
8639                if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
8640                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8641                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
8642                                                             "baseArrayLayer (%d) and layerCount (%d) be less "
8643                                                             "than or equal to the total number of layers (%d).",
8644                            funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount,
8645                            arrayLayers);
8646                }
8647                int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
8648                                     ? 1
8649                                     : mem_barrier->subresourceRange.levelCount;
8650                if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
8651                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8652                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
8653                                                             "(%d) and levelCount (%d) be less than or equal to "
8654                                                             "the total number of levels (%d).",
8655                            funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount,
8656                            mipLevels);
8657                }
8658            }
8659        }
8660    }
8661    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
8662        auto mem_barrier = &pBufferMemBarriers[i];
8663        if (pCB->activeRenderPass) {
8664            skip_call |=
8665                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8666                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
8667        }
8668        if (!mem_barrier)
8669            continue;
8670
8671        // Validate buffer barrier queue family indices
8672        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8673             mem_barrier->srcQueueFamilyIndex >= dev_data->physDevProperties.queue_family_properties.size()) ||
8674            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8675             mem_barrier->dstQueueFamilyIndex >= dev_data->physDevProperties.queue_family_properties.size())) {
8676            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8677                                 DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8678                                 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
8679                                 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
8680                                 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8681                                 dev_data->physDevProperties.queue_family_properties.size());
8682        }
8683
8684        auto buffer_data = dev_data->bufferMap.find(mem_barrier->buffer);
8685        uint64_t buffer_size =
8686            buffer_data->second.create_info ? reinterpret_cast<uint64_t &>(buffer_data->second.create_info->size) : 0;
8687        if (buffer_data != dev_data->bufferMap.end()) {
8688            if (mem_barrier->offset >= buffer_size) {
8689                skip_call |=
8690                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8691                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64
8692                                                             " whose sum is not less than total size %" PRIu64 ".",
8693                            funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8694                            reinterpret_cast<const uint64_t &>(mem_barrier->offset), buffer_size);
8695            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
8696                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8697                                     __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
8698                                     "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64 " and size %" PRIu64
8699                                     " whose sum is greater than total size %" PRIu64 ".",
8700                                     funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8701                                     reinterpret_cast<const uint64_t &>(mem_barrier->offset),
8702                                     reinterpret_cast<const uint64_t &>(mem_barrier->size), buffer_size);
8703            }
8704        }
8705    }
8706    return skip_call;
8707}
8708
8709bool validateEventStageMask(VkQueue queue, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask) {
8710    bool skip_call = false;
8711    VkPipelineStageFlags stageMask = 0;
8712    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
8713    for (uint32_t i = 0; i < eventCount; ++i) {
8714        auto queue_data = dev_data->queueMap.find(queue);
8715        if (queue_data == dev_data->queueMap.end())
8716            return false;
8717        auto event_data = queue_data->second.eventToStageMap.find(pEvents[i]);
8718        if (event_data != queue_data->second.eventToStageMap.end()) {
8719            stageMask |= event_data->second;
8720        } else {
8721            auto global_event_data = dev_data->eventMap.find(pEvents[i]);
8722            if (global_event_data == dev_data->eventMap.end()) {
8723                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
8724                                     reinterpret_cast<const uint64_t &>(pEvents[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
8725                                     "Fence 0x%" PRIx64 " cannot be waited on if it has never been set.",
8726                                     reinterpret_cast<const uint64_t &>(pEvents[i]));
8727            } else {
8728                stageMask |= global_event_data->second.stageMask;
8729            }
8730        }
8731    }
8732    if (sourceStageMask != stageMask) {
8733        skip_call |=
8734            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8735                    DRAWSTATE_INVALID_FENCE, "DS",
8736                    "Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%x which must be the bitwise OR of the "
8737                    "stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with vkSetEvent.",
8738                    sourceStageMask);
8739    }
8740    return skip_call;
8741}
8742
8743VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8744vkCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask,
8745                VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8746                uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8747                uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8748    VkBool32 skipCall = VK_FALSE;
8749    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8750    loader_platform_thread_lock_mutex(&globalLock);
8751    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8752    if (pCB) {
8753        for (uint32_t i = 0; i < eventCount; ++i) {
8754            pCB->waitedEvents.push_back(pEvents[i]);
8755            pCB->events.push_back(pEvents[i]);
8756        }
8757        std::function<bool(VkQueue)> eventUpdate =
8758            std::bind(validateEventStageMask, std::placeholders::_1, eventCount, pEvents, sourceStageMask);
8759        pCB->eventUpdates.push_back(eventUpdate);
8760        if (pCB->state == CB_RECORDING) {
8761            skipCall |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()");
8762        } else {
8763            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
8764        }
8765        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8766        skipCall |=
8767            ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8768                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8769    }
8770    loader_platform_thread_unlock_mutex(&globalLock);
8771    if (VK_FALSE == skipCall)
8772        dev_data->device_dispatch_table->CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
8773                                                       memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8774                                                       pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8775}
8776
8777VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8778vkCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
8779                     VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8780                     uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8781                     uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8782    VkBool32 skipCall = VK_FALSE;
8783    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8784    loader_platform_thread_lock_mutex(&globalLock);
8785    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8786    if (pCB) {
8787        skipCall |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
8788        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8789        skipCall |=
8790            ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8791                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8792    }
8793    loader_platform_thread_unlock_mutex(&globalLock);
8794    if (VK_FALSE == skipCall)
8795        dev_data->device_dispatch_table->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
8796                                                            memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8797                                                            pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8798}
8799
8800VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8801vkCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
8802    VkBool32 skipCall = VK_FALSE;
8803    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8804    loader_platform_thread_lock_mutex(&globalLock);
8805    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8806    if (pCB) {
8807        QueryObject query = {queryPool, slot};
8808        pCB->activeQueries.insert(query);
8809        if (!pCB->startedQueries.count(query)) {
8810            pCB->startedQueries.insert(query);
8811        }
8812        skipCall |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
8813    }
8814    loader_platform_thread_unlock_mutex(&globalLock);
8815    if (VK_FALSE == skipCall)
8816        dev_data->device_dispatch_table->CmdBeginQuery(commandBuffer, queryPool, slot, flags);
8817}
8818
8819VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
8820    VkBool32 skipCall = VK_FALSE;
8821    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8822    loader_platform_thread_lock_mutex(&globalLock);
8823    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8824    if (pCB) {
8825        QueryObject query = {queryPool, slot};
8826        if (!pCB->activeQueries.count(query)) {
8827            skipCall |=
8828                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8829                        DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool %" PRIu64 ", index %d",
8830                        (uint64_t)(queryPool), slot);
8831        } else {
8832            pCB->activeQueries.erase(query);
8833        }
8834        pCB->queryToStateMap[query] = 1;
8835        if (pCB->state == CB_RECORDING) {
8836            skipCall |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
8837        } else {
8838            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
8839        }
8840    }
8841    loader_platform_thread_unlock_mutex(&globalLock);
8842    if (VK_FALSE == skipCall)
8843        dev_data->device_dispatch_table->CmdEndQuery(commandBuffer, queryPool, slot);
8844}
8845
8846VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8847vkCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
8848    VkBool32 skipCall = VK_FALSE;
8849    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8850    loader_platform_thread_lock_mutex(&globalLock);
8851    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8852    if (pCB) {
8853        for (uint32_t i = 0; i < queryCount; i++) {
8854            QueryObject query = {queryPool, firstQuery + i};
8855            pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
8856            pCB->queryToStateMap[query] = 0;
8857        }
8858        if (pCB->state == CB_RECORDING) {
8859            skipCall |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
8860        } else {
8861            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
8862        }
8863        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool");
8864    }
8865    loader_platform_thread_unlock_mutex(&globalLock);
8866    if (VK_FALSE == skipCall)
8867        dev_data->device_dispatch_table->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
8868}
8869
8870VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8871vkCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
8872                          VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
8873    VkBool32 skipCall = VK_FALSE;
8874    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8875    loader_platform_thread_lock_mutex(&globalLock);
8876    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8877#if MTMERGESOURCE
8878    VkDeviceMemory mem;
8879    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8880    skipCall |=
8881        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8882    if (cb_data != dev_data->commandBufferMap.end()) {
8883        std::function<VkBool32()> function = [=]() {
8884            set_memory_valid(dev_data, mem, true);
8885            return VK_FALSE;
8886        };
8887        cb_data->second->validate_functions.push_back(function);
8888    }
8889    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyQueryPoolResults");
8890    // Validate that DST buffer has correct usage flags set
8891    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8892                                            "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8893#endif
8894    if (pCB) {
8895        for (uint32_t i = 0; i < queryCount; i++) {
8896            QueryObject query = {queryPool, firstQuery + i};
8897            if (!pCB->queryToStateMap[query]) {
8898                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8899                                    __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
8900                                    "Requesting a copy from query to buffer with invalid query: queryPool %" PRIu64 ", index %d",
8901                                    (uint64_t)(queryPool), firstQuery + i);
8902            }
8903        }
8904        if (pCB->state == CB_RECORDING) {
8905            skipCall |= addCmd(dev_data, pCB, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
8906        } else {
8907            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
8908        }
8909        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyQueryPoolResults");
8910    }
8911    loader_platform_thread_unlock_mutex(&globalLock);
8912    if (VK_FALSE == skipCall)
8913        dev_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer,
8914                                                                 dstOffset, stride, flags);
8915}
8916
8917VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
8918                                                              VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
8919                                                              const void *pValues) {
8920    bool skipCall = false;
8921    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8922    loader_platform_thread_lock_mutex(&globalLock);
8923    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8924    if (pCB) {
8925        if (pCB->state == CB_RECORDING) {
8926            skipCall |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
8927        } else {
8928            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
8929        }
8930    }
8931    if ((offset + size) > dev_data->physDevProperties.properties.limits.maxPushConstantsSize) {
8932        skipCall |= validatePushConstantSize(dev_data, offset, size, "vkCmdPushConstants()");
8933    }
8934    // TODO : Add warning if push constant update doesn't align with range
8935    loader_platform_thread_unlock_mutex(&globalLock);
8936    if (!skipCall)
8937        dev_data->device_dispatch_table->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
8938}
8939
8940VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8941vkCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
8942    VkBool32 skipCall = VK_FALSE;
8943    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8944    loader_platform_thread_lock_mutex(&globalLock);
8945    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8946    if (pCB) {
8947        QueryObject query = {queryPool, slot};
8948        pCB->queryToStateMap[query] = 1;
8949        if (pCB->state == CB_RECORDING) {
8950            skipCall |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
8951        } else {
8952            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
8953        }
8954    }
8955    loader_platform_thread_unlock_mutex(&globalLock);
8956    if (VK_FALSE == skipCall)
8957        dev_data->device_dispatch_table->CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
8958}
8959
8960VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
8961                                                                   const VkAllocationCallbacks *pAllocator,
8962                                                                   VkFramebuffer *pFramebuffer) {
8963    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8964    VkResult result = dev_data->device_dispatch_table->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
8965    if (VK_SUCCESS == result) {
8966        // Shadow create info and store in map
8967        VkFramebufferCreateInfo *localFBCI = new VkFramebufferCreateInfo(*pCreateInfo);
8968        if (pCreateInfo->pAttachments) {
8969            localFBCI->pAttachments = new VkImageView[localFBCI->attachmentCount];
8970            memcpy((void *)localFBCI->pAttachments, pCreateInfo->pAttachments, localFBCI->attachmentCount * sizeof(VkImageView));
8971        }
8972        FRAMEBUFFER_NODE fbNode = {};
8973        fbNode.createInfo = *localFBCI;
8974        std::pair<VkFramebuffer, FRAMEBUFFER_NODE> fbPair(*pFramebuffer, fbNode);
8975        loader_platform_thread_lock_mutex(&globalLock);
8976        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8977            VkImageView view = pCreateInfo->pAttachments[i];
8978            auto view_data = dev_data->imageViewMap.find(view);
8979            if (view_data == dev_data->imageViewMap.end()) {
8980                continue;
8981            }
8982            MT_FB_ATTACHMENT_INFO fb_info;
8983            get_mem_binding_from_object(dev_data, device, (uint64_t)(view_data->second.image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8984                                        &fb_info.mem);
8985            fb_info.image = view_data->second.image;
8986            fbPair.second.attachments.push_back(fb_info);
8987        }
8988        dev_data->frameBufferMap.insert(fbPair);
8989        loader_platform_thread_unlock_mutex(&globalLock);
8990    }
8991    return result;
8992}
8993
8994VkBool32 FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
8995                        std::unordered_set<uint32_t> &processed_nodes) {
8996    // If we have already checked this node we have not found a dependency path so return false.
8997    if (processed_nodes.count(index))
8998        return VK_FALSE;
8999    processed_nodes.insert(index);
9000    const DAGNode &node = subpass_to_node[index];
9001    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
9002    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
9003        for (auto elem : node.prev) {
9004            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
9005                return VK_TRUE;
9006        }
9007    } else {
9008        return VK_TRUE;
9009    }
9010    return VK_FALSE;
9011}
9012
9013VkBool32 CheckDependencyExists(const layer_data *my_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
9014                               const std::vector<DAGNode> &subpass_to_node, VkBool32 &skip_call) {
9015    VkBool32 result = VK_TRUE;
9016    // Loop through all subpasses that share the same attachment and make sure a dependency exists
9017    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
9018        if (subpass == dependent_subpasses[k])
9019            continue;
9020        const DAGNode &node = subpass_to_node[subpass];
9021        // Check for a specified dependency between the two nodes. If one exists we are done.
9022        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
9023        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
9024        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
9025            // If no dependency exits an implicit dependency still might. If so, warn and if not throw an error.
9026            std::unordered_set<uint32_t> processed_nodes;
9027            if (FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
9028                FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes)) {
9029                // TODO: Verify against Valid Use section of spec
9030                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9031                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9032                                     "A dependency between subpasses %d and %d must exist but only an implicit one is specified.",
9033                                     subpass, dependent_subpasses[k]);
9034            } else {
9035                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9036                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9037                                     "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
9038                                     dependent_subpasses[k]);
9039                result = VK_FALSE;
9040            }
9041        }
9042    }
9043    return result;
9044}
9045
9046VkBool32 CheckPreserved(const layer_data *my_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
9047                        const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, VkBool32 &skip_call) {
9048    const DAGNode &node = subpass_to_node[index];
9049    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
9050    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9051    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9052        if (attachment == subpass.pColorAttachments[j].attachment)
9053            return VK_TRUE;
9054    }
9055    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9056        if (attachment == subpass.pDepthStencilAttachment->attachment)
9057            return VK_TRUE;
9058    }
9059    VkBool32 result = VK_FALSE;
9060    // Loop through previous nodes and see if any of them write to the attachment.
9061    for (auto elem : node.prev) {
9062        result |= CheckPreserved(my_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
9063    }
9064    // If the attachment was written to by a previous node than this node needs to preserve it.
9065    if (result && depth > 0) {
9066        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9067        VkBool32 has_preserved = VK_FALSE;
9068        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9069            if (subpass.pPreserveAttachments[j] == attachment) {
9070                has_preserved = VK_TRUE;
9071                break;
9072            }
9073        }
9074        if (has_preserved == VK_FALSE) {
9075            skip_call |=
9076                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9077                        DRAWSTATE_INVALID_RENDERPASS, "DS",
9078                        "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
9079        }
9080    }
9081    return result;
9082}
9083
9084template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
9085    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
9086           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
9087}
9088
9089bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
9090    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
9091            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
9092}
9093
9094VkBool32 ValidateDependencies(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin,
9095                              const std::vector<DAGNode> &subpass_to_node) {
9096    VkBool32 skip_call = VK_FALSE;
9097    const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo;
9098    const VkRenderPassCreateInfo *pCreateInfo = my_data->renderPassMap.at(pRenderPassBegin->renderPass)->pCreateInfo;
9099    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
9100    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
9101    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
9102    // Find overlapping attachments
9103    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9104        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
9105            VkImageView viewi = pFramebufferInfo->pAttachments[i];
9106            VkImageView viewj = pFramebufferInfo->pAttachments[j];
9107            if (viewi == viewj) {
9108                overlapping_attachments[i].push_back(j);
9109                overlapping_attachments[j].push_back(i);
9110                continue;
9111            }
9112            auto view_data_i = my_data->imageViewMap.find(viewi);
9113            auto view_data_j = my_data->imageViewMap.find(viewj);
9114            if (view_data_i == my_data->imageViewMap.end() || view_data_j == my_data->imageViewMap.end()) {
9115                continue;
9116            }
9117            if (view_data_i->second.image == view_data_j->second.image &&
9118                isRegionOverlapping(view_data_i->second.subresourceRange, view_data_j->second.subresourceRange)) {
9119                overlapping_attachments[i].push_back(j);
9120                overlapping_attachments[j].push_back(i);
9121                continue;
9122            }
9123            auto image_data_i = my_data->imageMap.find(view_data_i->second.image);
9124            auto image_data_j = my_data->imageMap.find(view_data_j->second.image);
9125            if (image_data_i == my_data->imageMap.end() || image_data_j == my_data->imageMap.end()) {
9126                continue;
9127            }
9128            if (image_data_i->second.mem == image_data_j->second.mem &&
9129                isRangeOverlapping(image_data_i->second.memOffset, image_data_i->second.memSize, image_data_j->second.memOffset,
9130                                   image_data_j->second.memSize)) {
9131                overlapping_attachments[i].push_back(j);
9132                overlapping_attachments[j].push_back(i);
9133            }
9134        }
9135    }
9136    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
9137        uint32_t attachment = i;
9138        for (auto other_attachment : overlapping_attachments[i]) {
9139            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9140                skip_call |=
9141                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9142                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9143                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9144                            attachment, other_attachment);
9145            }
9146            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9147                skip_call |=
9148                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9149                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9150                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9151                            other_attachment, attachment);
9152            }
9153        }
9154    }
9155    // Find for each attachment the subpasses that use them.
9156    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9157        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9158        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9159            uint32_t attachment = subpass.pInputAttachments[j].attachment;
9160            input_attachment_to_subpass[attachment].push_back(i);
9161            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9162                input_attachment_to_subpass[overlapping_attachment].push_back(i);
9163            }
9164        }
9165        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9166            uint32_t attachment = subpass.pColorAttachments[j].attachment;
9167            output_attachment_to_subpass[attachment].push_back(i);
9168            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9169                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9170            }
9171        }
9172        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9173            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9174            output_attachment_to_subpass[attachment].push_back(i);
9175            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9176                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9177            }
9178        }
9179    }
9180    // If there is a dependency needed make sure one exists
9181    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9182        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9183        // If the attachment is an input then all subpasses that output must have a dependency relationship
9184        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9185            const uint32_t &attachment = subpass.pInputAttachments[j].attachment;
9186            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9187        }
9188        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
9189        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9190            const uint32_t &attachment = subpass.pColorAttachments[j].attachment;
9191            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9192            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9193        }
9194        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9195            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
9196            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9197            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9198        }
9199    }
9200    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
9201    // written.
9202    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9203        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9204        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9205            CheckPreserved(my_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
9206        }
9207    }
9208    return skip_call;
9209}
9210
9211VkBool32 ValidateLayouts(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
9212    VkBool32 skip = VK_FALSE;
9213
9214    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9215        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9216        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9217            if (subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL &&
9218                subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
9219                if (subpass.pInputAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
9220                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9221                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9222                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9223                                    "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
9224                } else {
9225                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9226                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9227                                    "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
9228                                    string_VkImageLayout(subpass.pInputAttachments[j].layout));
9229                }
9230            }
9231        }
9232        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9233            if (subpass.pColorAttachments[j].layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
9234                if (subpass.pColorAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
9235                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9236                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9237                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9238                                    "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
9239                } else {
9240                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9241                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9242                                    "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
9243                                    string_VkImageLayout(subpass.pColorAttachments[j].layout));
9244                }
9245            }
9246        }
9247        if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
9248            if (subpass.pDepthStencilAttachment->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) {
9249                if (subpass.pDepthStencilAttachment->layout == VK_IMAGE_LAYOUT_GENERAL) {
9250                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9251                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9252                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9253                                    "Layout for depth attachment is GENERAL but should be DEPTH_STENCIL_ATTACHMENT_OPTIMAL.");
9254                } else {
9255                    skip |=
9256                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9257                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9258                                "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL or GENERAL.",
9259                                string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
9260                }
9261            }
9262        }
9263    }
9264    return skip;
9265}
9266
9267VkBool32 CreatePassDAG(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9268                       std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
9269    VkBool32 skip_call = VK_FALSE;
9270    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9271        DAGNode &subpass_node = subpass_to_node[i];
9272        subpass_node.pass = i;
9273    }
9274    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
9275        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
9276        if (dependency.srcSubpass > dependency.dstSubpass && dependency.srcSubpass != VK_SUBPASS_EXTERNAL &&
9277            dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
9278            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9279                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
9280                                 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
9281        } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL && dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
9282            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9283                                 DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
9284        } else if (dependency.srcSubpass == dependency.dstSubpass) {
9285            has_self_dependency[dependency.srcSubpass] = true;
9286        }
9287        if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
9288            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
9289        }
9290        if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL) {
9291            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
9292        }
9293    }
9294    return skip_call;
9295}
9296
9297
9298VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
9299                                                                    const VkAllocationCallbacks *pAllocator,
9300                                                                    VkShaderModule *pShaderModule) {
9301    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9302    VkBool32 skip_call = VK_FALSE;
9303    if (!shader_is_spirv(pCreateInfo)) {
9304        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
9305                             /* dev */ 0, __LINE__, SHADER_CHECKER_NON_SPIRV_SHADER, "SC", "Shader is not SPIR-V");
9306    }
9307
9308    if (VK_FALSE != skip_call)
9309        return VK_ERROR_VALIDATION_FAILED_EXT;
9310
9311    VkResult res = my_data->device_dispatch_table->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
9312
9313    if (res == VK_SUCCESS) {
9314        loader_platform_thread_lock_mutex(&globalLock);
9315        my_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
9316        loader_platform_thread_unlock_mutex(&globalLock);
9317    }
9318    return res;
9319}
9320
9321VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9322                                                                  const VkAllocationCallbacks *pAllocator,
9323                                                                  VkRenderPass *pRenderPass) {
9324    VkBool32 skip_call = VK_FALSE;
9325    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9326    loader_platform_thread_lock_mutex(&globalLock);
9327    // Create DAG
9328    std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
9329    std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
9330    skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
9331    // Validate
9332    skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
9333    if (VK_FALSE != skip_call) {
9334        loader_platform_thread_unlock_mutex(&globalLock);
9335        return VK_ERROR_VALIDATION_FAILED_EXT;
9336    }
9337    loader_platform_thread_unlock_mutex(&globalLock);
9338    VkResult result = dev_data->device_dispatch_table->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
9339    if (VK_SUCCESS == result) {
9340        loader_platform_thread_lock_mutex(&globalLock);
9341        // TODOSC : Merge in tracking of renderpass from shader_checker
9342        // Shadow create info and store in map
9343        VkRenderPassCreateInfo *localRPCI = new VkRenderPassCreateInfo(*pCreateInfo);
9344        if (pCreateInfo->pAttachments) {
9345            localRPCI->pAttachments = new VkAttachmentDescription[localRPCI->attachmentCount];
9346            memcpy((void *)localRPCI->pAttachments, pCreateInfo->pAttachments,
9347                   localRPCI->attachmentCount * sizeof(VkAttachmentDescription));
9348        }
9349        if (pCreateInfo->pSubpasses) {
9350            localRPCI->pSubpasses = new VkSubpassDescription[localRPCI->subpassCount];
9351            memcpy((void *)localRPCI->pSubpasses, pCreateInfo->pSubpasses, localRPCI->subpassCount * sizeof(VkSubpassDescription));
9352
9353            for (uint32_t i = 0; i < localRPCI->subpassCount; i++) {
9354                VkSubpassDescription *subpass = (VkSubpassDescription *)&localRPCI->pSubpasses[i];
9355                const uint32_t attachmentCount = subpass->inputAttachmentCount +
9356                                                 subpass->colorAttachmentCount * (1 + (subpass->pResolveAttachments ? 1 : 0)) +
9357                                                 ((subpass->pDepthStencilAttachment) ? 1 : 0) + subpass->preserveAttachmentCount;
9358                VkAttachmentReference *attachments = new VkAttachmentReference[attachmentCount];
9359
9360                memcpy(attachments, subpass->pInputAttachments, sizeof(attachments[0]) * subpass->inputAttachmentCount);
9361                subpass->pInputAttachments = attachments;
9362                attachments += subpass->inputAttachmentCount;
9363
9364                memcpy(attachments, subpass->pColorAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
9365                subpass->pColorAttachments = attachments;
9366                attachments += subpass->colorAttachmentCount;
9367
9368                if (subpass->pResolveAttachments) {
9369                    memcpy(attachments, subpass->pResolveAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
9370                    subpass->pResolveAttachments = attachments;
9371                    attachments += subpass->colorAttachmentCount;
9372                }
9373
9374                if (subpass->pDepthStencilAttachment) {
9375                    memcpy(attachments, subpass->pDepthStencilAttachment, sizeof(attachments[0]) * 1);
9376                    subpass->pDepthStencilAttachment = attachments;
9377                    attachments += 1;
9378                }
9379
9380                memcpy(attachments, subpass->pPreserveAttachments, sizeof(attachments[0]) * subpass->preserveAttachmentCount);
9381                subpass->pPreserveAttachments = &attachments->attachment;
9382            }
9383        }
9384        if (pCreateInfo->pDependencies) {
9385            localRPCI->pDependencies = new VkSubpassDependency[localRPCI->dependencyCount];
9386            memcpy((void *)localRPCI->pDependencies, pCreateInfo->pDependencies,
9387                   localRPCI->dependencyCount * sizeof(VkSubpassDependency));
9388        }
9389        dev_data->renderPassMap[*pRenderPass] = new RENDER_PASS_NODE(localRPCI);
9390        dev_data->renderPassMap[*pRenderPass]->hasSelfDependency = has_self_dependency;
9391        dev_data->renderPassMap[*pRenderPass]->subpassToNode = subpass_to_node;
9392#if MTMERGESOURCE
9393        // MTMTODO : Merge with code from above to eliminate duplication
9394        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9395            VkAttachmentDescription desc = pCreateInfo->pAttachments[i];
9396            MT_PASS_ATTACHMENT_INFO pass_info;
9397            pass_info.load_op = desc.loadOp;
9398            pass_info.store_op = desc.storeOp;
9399            pass_info.attachment = i;
9400            dev_data->renderPassMap[*pRenderPass]->attachments.push_back(pass_info);
9401        }
9402        // TODO: Maybe fill list and then copy instead of locking
9403        std::unordered_map<uint32_t, bool> &attachment_first_read = dev_data->renderPassMap[*pRenderPass]->attachment_first_read;
9404        std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout =
9405            dev_data->renderPassMap[*pRenderPass]->attachment_first_layout;
9406        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9407            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9408            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9409                uint32_t attachment = subpass.pInputAttachments[j].attachment;
9410                if (attachment_first_read.count(attachment))
9411                    continue;
9412                attachment_first_read.insert(std::make_pair(attachment, true));
9413                attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
9414            }
9415            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9416                uint32_t attachment = subpass.pColorAttachments[j].attachment;
9417                if (attachment_first_read.count(attachment))
9418                    continue;
9419                attachment_first_read.insert(std::make_pair(attachment, false));
9420                attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
9421            }
9422            if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9423                uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9424                if (attachment_first_read.count(attachment))
9425                    continue;
9426                attachment_first_read.insert(std::make_pair(attachment, false));
9427                attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
9428            }
9429        }
9430#endif
9431        loader_platform_thread_unlock_mutex(&globalLock);
9432    }
9433    return result;
9434}
9435// Free the renderpass shadow
9436static void deleteRenderPasses(layer_data *my_data) {
9437    if (my_data->renderPassMap.size() <= 0)
9438        return;
9439    for (auto ii = my_data->renderPassMap.begin(); ii != my_data->renderPassMap.end(); ++ii) {
9440        const VkRenderPassCreateInfo *pRenderPassInfo = (*ii).second->pCreateInfo;
9441        delete[] pRenderPassInfo->pAttachments;
9442        if (pRenderPassInfo->pSubpasses) {
9443            for (uint32_t i = 0; i < pRenderPassInfo->subpassCount; ++i) {
9444                // Attachements are all allocated in a block, so just need to
9445                //  find the first non-null one to delete
9446                if (pRenderPassInfo->pSubpasses[i].pInputAttachments) {
9447                    delete[] pRenderPassInfo->pSubpasses[i].pInputAttachments;
9448                } else if (pRenderPassInfo->pSubpasses[i].pColorAttachments) {
9449                    delete[] pRenderPassInfo->pSubpasses[i].pColorAttachments;
9450                } else if (pRenderPassInfo->pSubpasses[i].pResolveAttachments) {
9451                    delete[] pRenderPassInfo->pSubpasses[i].pResolveAttachments;
9452                } else if (pRenderPassInfo->pSubpasses[i].pPreserveAttachments) {
9453                    delete[] pRenderPassInfo->pSubpasses[i].pPreserveAttachments;
9454                }
9455            }
9456            delete[] pRenderPassInfo->pSubpasses;
9457        }
9458        delete[] pRenderPassInfo->pDependencies;
9459        delete pRenderPassInfo;
9460        delete (*ii).second;
9461    }
9462    my_data->renderPassMap.clear();
9463}
9464
9465VkBool32 VerifyFramebufferAndRenderPassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) {
9466    VkBool32 skip_call = VK_FALSE;
9467    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9468    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9469    const VkRenderPassCreateInfo *pRenderPassInfo = dev_data->renderPassMap[pRenderPassBegin->renderPass]->pCreateInfo;
9470    const VkFramebufferCreateInfo framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer].createInfo;
9471    if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
9472        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9473                             DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
9474                                                                 "with a different number of attachments.");
9475    }
9476    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9477        const VkImageView &image_view = framebufferInfo.pAttachments[i];
9478        auto image_data = dev_data->imageViewMap.find(image_view);
9479        assert(image_data != dev_data->imageViewMap.end());
9480        const VkImage &image = image_data->second.image;
9481        const VkImageSubresourceRange &subRange = image_data->second.subresourceRange;
9482        IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
9483                                             pRenderPassInfo->pAttachments[i].initialLayout};
9484        // TODO: Do not iterate over every possibility - consolidate where possible
9485        for (uint32_t j = 0; j < subRange.levelCount; j++) {
9486            uint32_t level = subRange.baseMipLevel + j;
9487            for (uint32_t k = 0; k < subRange.layerCount; k++) {
9488                uint32_t layer = subRange.baseArrayLayer + k;
9489                VkImageSubresource sub = {subRange.aspectMask, level, layer};
9490                IMAGE_CMD_BUF_LAYOUT_NODE node;
9491                if (!FindLayout(pCB, image, sub, node)) {
9492                    SetLayout(pCB, image, sub, newNode);
9493                    continue;
9494                }
9495                if (newNode.layout != node.layout) {
9496                    skip_call |=
9497                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9498                                DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using attachment %i "
9499                                                                    "where the "
9500                                                                    "initial layout is %s and the layout of the attachment at the "
9501                                                                    "start of the render pass is %s. The layouts must match.",
9502                                i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout));
9503                }
9504            }
9505        }
9506    }
9507    return skip_call;
9508}
9509
9510void TransitionSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const int subpass_index) {
9511    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9512    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9513    auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9514    if (render_pass_data == dev_data->renderPassMap.end()) {
9515        return;
9516    }
9517    const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo;
9518    auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer);
9519    if (framebuffer_data == dev_data->frameBufferMap.end()) {
9520        return;
9521    }
9522    const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo;
9523    const VkSubpassDescription &subpass = pRenderPassInfo->pSubpasses[subpass_index];
9524    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9525        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pInputAttachments[j].attachment];
9526        SetLayout(dev_data, pCB, image_view, subpass.pInputAttachments[j].layout);
9527    }
9528    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9529        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pColorAttachments[j].attachment];
9530        SetLayout(dev_data, pCB, image_view, subpass.pColorAttachments[j].layout);
9531    }
9532    if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
9533        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pDepthStencilAttachment->attachment];
9534        SetLayout(dev_data, pCB, image_view, subpass.pDepthStencilAttachment->layout);
9535    }
9536}
9537
9538VkBool32 validatePrimaryCommandBuffer(const layer_data *my_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) {
9539    VkBool32 skip_call = VK_FALSE;
9540    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
9541        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9542                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.",
9543                             cmd_name.c_str());
9544    }
9545    return skip_call;
9546}
9547
9548void TransitionFinalSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) {
9549    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9550    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9551    auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9552    if (render_pass_data == dev_data->renderPassMap.end()) {
9553        return;
9554    }
9555    const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo;
9556    auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer);
9557    if (framebuffer_data == dev_data->frameBufferMap.end()) {
9558        return;
9559    }
9560    const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo;
9561    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9562        const VkImageView &image_view = framebufferInfo.pAttachments[i];
9563        SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
9564    }
9565}
9566
9567bool VerifyRenderAreaBounds(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
9568    bool skip_call = false;
9569    const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo;
9570    if (pRenderPassBegin->renderArea.offset.x < 0 ||
9571        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
9572        pRenderPassBegin->renderArea.offset.y < 0 ||
9573        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
9574        skip_call |= static_cast<bool>(log_msg(
9575            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9576            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
9577            "Cannot execute a render pass with renderArea not within the bound of the "
9578            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
9579            "height %d.",
9580            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
9581            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
9582    }
9583    return skip_call;
9584}
9585
9586VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
9587vkCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
9588    VkBool32 skipCall = VK_FALSE;
9589    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9590    loader_platform_thread_lock_mutex(&globalLock);
9591    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9592    if (pCB) {
9593        if (pRenderPassBegin && pRenderPassBegin->renderPass) {
9594#if MTMERGE
9595            auto pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9596            if (pass_data != dev_data->renderPassMap.end()) {
9597                RENDER_PASS_NODE* pRPNode = pass_data->second;
9598                pRPNode->fb = pRenderPassBegin->framebuffer;
9599                auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
9600                for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
9601                    MT_FB_ATTACHMENT_INFO &fb_info = dev_data->frameBufferMap[pRPNode->fb].attachments[i];
9602                    if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
9603                        if (cb_data != dev_data->commandBufferMap.end()) {
9604                            std::function<VkBool32()> function = [=]() {
9605                                set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9606                                return VK_FALSE;
9607                            };
9608                            cb_data->second->validate_functions.push_back(function);
9609                        }
9610                        VkImageLayout &attachment_layout = pRPNode->attachment_first_layout[pRPNode->attachments[i].attachment];
9611                        if (attachment_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL ||
9612                            attachment_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
9613                            skipCall |=
9614                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9615                                        VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, (uint64_t)(pRenderPassBegin->renderPass), __LINE__,
9616                                        MEMTRACK_INVALID_LAYOUT, "MEM", "Cannot clear attachment %d with invalid first layout %d.",
9617                                        pRPNode->attachments[i].attachment, attachment_layout);
9618                        }
9619                    } else if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE) {
9620                        if (cb_data != dev_data->commandBufferMap.end()) {
9621                            std::function<VkBool32()> function = [=]() {
9622                                set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9623                                return VK_FALSE;
9624                            };
9625                            cb_data->second->validate_functions.push_back(function);
9626                        }
9627                    } else if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_LOAD) {
9628                        if (cb_data != dev_data->commandBufferMap.end()) {
9629                            std::function<VkBool32()> function = [=]() {
9630                                return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9631                            };
9632                            cb_data->second->validate_functions.push_back(function);
9633                        }
9634                    }
9635                    if (pRPNode->attachment_first_read[pRPNode->attachments[i].attachment]) {
9636                        if (cb_data != dev_data->commandBufferMap.end()) {
9637                            std::function<VkBool32()> function = [=]() {
9638                                return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9639                            };
9640                            cb_data->second->validate_functions.push_back(function);
9641                        }
9642                    }
9643                }
9644            }
9645#endif
9646            skipCall |= static_cast<VkBool32>(VerifyRenderAreaBounds(dev_data, pRenderPassBegin));
9647            skipCall |= VerifyFramebufferAndRenderPassLayouts(commandBuffer, pRenderPassBegin);
9648            auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9649            if (render_pass_data != dev_data->renderPassMap.end()) {
9650                skipCall |= ValidateDependencies(dev_data, pRenderPassBegin, render_pass_data->second->subpassToNode);
9651            }
9652            skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBeginRenderPass");
9653            skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdBeginRenderPass");
9654            skipCall |= addCmd(dev_data, pCB, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
9655            pCB->activeRenderPass = pRenderPassBegin->renderPass;
9656            // This is a shallow copy as that is all that is needed for now
9657            pCB->activeRenderPassBeginInfo = *pRenderPassBegin;
9658            pCB->activeSubpass = 0;
9659            pCB->activeSubpassContents = contents;
9660            pCB->framebuffer = pRenderPassBegin->framebuffer;
9661            // Connect this framebuffer to this cmdBuffer
9662            dev_data->frameBufferMap[pCB->framebuffer].referencingCmdBuffers.insert(pCB->commandBuffer);
9663        } else {
9664            skipCall |=
9665                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9666                        DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()");
9667        }
9668    }
9669    loader_platform_thread_unlock_mutex(&globalLock);
9670    if (VK_FALSE == skipCall) {
9671        dev_data->device_dispatch_table->CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
9672        loader_platform_thread_lock_mutex(&globalLock);
9673        // This is a shallow copy as that is all that is needed for now
9674        dev_data->renderPassBeginInfo = *pRenderPassBegin;
9675        dev_data->currentSubpass = 0;
9676        loader_platform_thread_unlock_mutex(&globalLock);
9677    }
9678}
9679
9680VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
9681    VkBool32 skipCall = VK_FALSE;
9682    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9683    loader_platform_thread_lock_mutex(&globalLock);
9684    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9685    TransitionSubpassLayouts(commandBuffer, &dev_data->renderPassBeginInfo, ++dev_data->currentSubpass);
9686    if (pCB) {
9687        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass");
9688        skipCall |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
9689        pCB->activeSubpass++;
9690        pCB->activeSubpassContents = contents;
9691        TransitionSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
9692        if (pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline) {
9693            skipCall |= validatePipelineState(dev_data, pCB, VK_PIPELINE_BIND_POINT_GRAPHICS,
9694                                              pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
9695        }
9696        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass");
9697    }
9698    loader_platform_thread_unlock_mutex(&globalLock);
9699    if (VK_FALSE == skipCall)
9700        dev_data->device_dispatch_table->CmdNextSubpass(commandBuffer, contents);
9701}
9702
9703VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(VkCommandBuffer commandBuffer) {
9704    VkBool32 skipCall = VK_FALSE;
9705    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9706    loader_platform_thread_lock_mutex(&globalLock);
9707#if MTMERGESOURCE
9708    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
9709    if (cb_data != dev_data->commandBufferMap.end()) {
9710        auto pass_data = dev_data->renderPassMap.find(cb_data->second->activeRenderPass);
9711        if (pass_data != dev_data->renderPassMap.end()) {
9712            RENDER_PASS_NODE* pRPNode = pass_data->second;
9713            for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
9714                MT_FB_ATTACHMENT_INFO &fb_info = dev_data->frameBufferMap[pRPNode->fb].attachments[i];
9715                if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_STORE) {
9716                    if (cb_data != dev_data->commandBufferMap.end()) {
9717                        std::function<VkBool32()> function = [=]() {
9718                            set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9719                            return VK_FALSE;
9720                        };
9721                        cb_data->second->validate_functions.push_back(function);
9722                    }
9723                } else if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_DONT_CARE) {
9724                    if (cb_data != dev_data->commandBufferMap.end()) {
9725                        std::function<VkBool32()> function = [=]() {
9726                            set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9727                            return VK_FALSE;
9728                        };
9729                        cb_data->second->validate_functions.push_back(function);
9730                    }
9731                }
9732            }
9733        }
9734    }
9735#endif
9736    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9737    TransitionFinalSubpassLayouts(commandBuffer, &dev_data->renderPassBeginInfo);
9738    if (pCB) {
9739        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass");
9740        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass");
9741        skipCall |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
9742        TransitionFinalSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo);
9743        pCB->activeRenderPass = 0;
9744        pCB->activeSubpass = 0;
9745    }
9746    loader_platform_thread_unlock_mutex(&globalLock);
9747    if (VK_FALSE == skipCall)
9748        dev_data->device_dispatch_table->CmdEndRenderPass(commandBuffer);
9749}
9750
9751bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass,
9752                                 VkRenderPass primaryPass, uint32_t primaryAttach, uint32_t secondaryAttach, const char *msg) {
9753    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9754                   DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9755                   "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64
9756                   " that is not compatible with the current render pass %" PRIx64 "."
9757                   "Attachment %" PRIu32 " is not compatable with %" PRIu32 ". %s",
9758                   (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass), primaryAttach, secondaryAttach,
9759                   msg);
9760}
9761
9762bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9763                                     uint32_t primaryAttach, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass,
9764                                     uint32_t secondaryAttach, bool is_multi) {
9765    bool skip_call = false;
9766    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9767    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9768    if (primary_data->second->pCreateInfo->attachmentCount <= primaryAttach) {
9769        primaryAttach = VK_ATTACHMENT_UNUSED;
9770    }
9771    if (secondary_data->second->pCreateInfo->attachmentCount <= secondaryAttach) {
9772        secondaryAttach = VK_ATTACHMENT_UNUSED;
9773    }
9774    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
9775        return skip_call;
9776    }
9777    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
9778        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9779                                                 secondaryAttach, "The first is unused while the second is not.");
9780        return skip_call;
9781    }
9782    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
9783        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9784                                                 secondaryAttach, "The second is unused while the first is not.");
9785        return skip_call;
9786    }
9787    if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].format !=
9788        secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].format) {
9789        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9790                                                 secondaryAttach, "They have different formats.");
9791    }
9792    if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].samples !=
9793        secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].samples) {
9794        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9795                                                 secondaryAttach, "They have different samples.");
9796    }
9797    if (is_multi &&
9798        primary_data->second->pCreateInfo->pAttachments[primaryAttach].flags !=
9799            secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].flags) {
9800        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9801                                                 secondaryAttach, "They have different flags.");
9802    }
9803    return skip_call;
9804}
9805
9806bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9807                                  VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass, const int subpass, bool is_multi) {
9808    bool skip_call = false;
9809    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9810    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9811    const VkSubpassDescription &primary_desc = primary_data->second->pCreateInfo->pSubpasses[subpass];
9812    const VkSubpassDescription &secondary_desc = secondary_data->second->pCreateInfo->pSubpasses[subpass];
9813    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
9814    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
9815        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
9816        if (i < primary_desc.inputAttachmentCount) {
9817            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
9818        }
9819        if (i < secondary_desc.inputAttachmentCount) {
9820            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
9821        }
9822        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_input_attach, secondaryBuffer,
9823                                                     secondaryPass, secondary_input_attach, is_multi);
9824    }
9825    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
9826    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
9827        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
9828        if (i < primary_desc.colorAttachmentCount) {
9829            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
9830        }
9831        if (i < secondary_desc.colorAttachmentCount) {
9832            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
9833        }
9834        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_color_attach, secondaryBuffer,
9835                                                     secondaryPass, secondary_color_attach, is_multi);
9836        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
9837        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
9838            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
9839        }
9840        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
9841            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
9842        }
9843        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_resolve_attach, secondaryBuffer,
9844                                                     secondaryPass, secondary_resolve_attach, is_multi);
9845    }
9846    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
9847    if (primary_desc.pDepthStencilAttachment) {
9848        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
9849    }
9850    if (secondary_desc.pDepthStencilAttachment) {
9851        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
9852    }
9853    skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_depthstencil_attach, secondaryBuffer,
9854                                                 secondaryPass, secondary_depthstencil_attach, is_multi);
9855    return skip_call;
9856}
9857
9858bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9859                                     VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass) {
9860    bool skip_call = false;
9861    // Early exit if renderPass objects are identical (and therefore compatible)
9862    if (primaryPass == secondaryPass)
9863        return skip_call;
9864    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9865    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9866    if (primary_data == dev_data->renderPassMap.end() || primary_data->second == nullptr) {
9867        skip_call |=
9868            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9869                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9870                    "vkCmdExecuteCommands() called w/ invalid current Cmd Buffer %p which has invalid render pass %" PRIx64 ".",
9871                    (void *)primaryBuffer, (uint64_t)(primaryPass));
9872        return skip_call;
9873    }
9874    if (secondary_data == dev_data->renderPassMap.end() || secondary_data->second == nullptr) {
9875        skip_call |=
9876            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9877                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9878                    "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer %p which has invalid render pass %" PRIx64 ".",
9879                    (void *)secondaryBuffer, (uint64_t)(secondaryPass));
9880        return skip_call;
9881    }
9882    if (primary_data->second->pCreateInfo->subpassCount != secondary_data->second->pCreateInfo->subpassCount) {
9883        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9884                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9885                             "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64
9886                             " that is not compatible with the current render pass %" PRIx64 "."
9887                             "They have a different number of subpasses.",
9888                             (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass));
9889        return skip_call;
9890    }
9891    bool is_multi = primary_data->second->pCreateInfo->subpassCount > 1;
9892    for (uint32_t i = 0; i < primary_data->second->pCreateInfo->subpassCount; ++i) {
9893        skip_call |=
9894            validateSubpassCompatibility(dev_data, primaryBuffer, primaryPass, secondaryBuffer, secondaryPass, i, is_multi);
9895    }
9896    return skip_call;
9897}
9898
9899bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
9900                         VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
9901    bool skip_call = false;
9902    if (!pSubCB->beginInfo.pInheritanceInfo) {
9903        return skip_call;
9904    }
9905    VkFramebuffer primary_fb = pCB->framebuffer;
9906    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
9907    if (secondary_fb != VK_NULL_HANDLE) {
9908        if (primary_fb != secondary_fb) {
9909            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9910                                 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9911                                 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a framebuffer %" PRIx64
9912                                 " that is not compatible with the current framebuffer %" PRIx64 ".",
9913                                 (void *)secondaryBuffer, (uint64_t)(secondary_fb), (uint64_t)(primary_fb));
9914        }
9915        auto fb_data = dev_data->frameBufferMap.find(secondary_fb);
9916        if (fb_data == dev_data->frameBufferMap.end()) {
9917            skip_call |=
9918                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9919                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
9920                                                                          "which has invalid framebuffer %" PRIx64 ".",
9921                        (void *)secondaryBuffer, (uint64_t)(secondary_fb));
9922            return skip_call;
9923        }
9924        skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb_data->second.createInfo.renderPass,
9925                                                     secondaryBuffer, pSubCB->beginInfo.pInheritanceInfo->renderPass);
9926    }
9927    return skip_call;
9928}
9929
9930bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
9931    bool skipCall = false;
9932    unordered_set<int> activeTypes;
9933    for (auto queryObject : pCB->activeQueries) {
9934        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9935        if (queryPoolData != dev_data->queryPoolMap.end()) {
9936            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
9937                pSubCB->beginInfo.pInheritanceInfo) {
9938                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
9939                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
9940                    skipCall |= log_msg(
9941                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9942                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9943                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
9944                        "which has invalid active query pool %" PRIx64 ". Pipeline statistics is being queried so the command "
9945                        "buffer must have all bits set on the queryPool.",
9946                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first));
9947                }
9948            }
9949            activeTypes.insert(queryPoolData->second.createInfo.queryType);
9950        }
9951    }
9952    for (auto queryObject : pSubCB->startedQueries) {
9953        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9954        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
9955            skipCall |=
9956                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9957                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9958                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
9959                        "which has invalid active query pool %" PRIx64 "of type %d but a query of that type has been started on "
9960                        "secondary Cmd Buffer %p.",
9961                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first),
9962                        queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer));
9963        }
9964    }
9965    return skipCall;
9966}
9967
9968VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
9969vkCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
9970    VkBool32 skipCall = VK_FALSE;
9971    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9972    loader_platform_thread_lock_mutex(&globalLock);
9973    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9974    if (pCB) {
9975        GLOBAL_CB_NODE *pSubCB = NULL;
9976        for (uint32_t i = 0; i < commandBuffersCount; i++) {
9977            pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
9978            if (!pSubCB) {
9979                skipCall |=
9980                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9981                            DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9982                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p in element %u of pCommandBuffers array.",
9983                            (void *)pCommandBuffers[i], i);
9984            } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
9985                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9986                                    __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9987                                    "vkCmdExecuteCommands() called w/ Primary Cmd Buffer %p in element %u of pCommandBuffers "
9988                                    "array. All cmd buffers in pCommandBuffers array must be secondary.",
9989                                    (void *)pCommandBuffers[i], i);
9990            } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
9991                if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
9992                    skipCall |= log_msg(
9993                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9994                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
9995                        "vkCmdExecuteCommands(): Secondary Command Buffer (%p) executed within render pass (%#" PRIxLEAST64
9996                        ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
9997                        (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass);
9998                } else {
9999                    // Make sure render pass is compatible with parent command buffer pass if has continue
10000                    skipCall |= validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass, pCommandBuffers[i],
10001                                                                pSubCB->beginInfo.pInheritanceInfo->renderPass);
10002                    skipCall |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
10003                }
10004                string errorString = "";
10005                if (!verify_renderpass_compatibility(dev_data, pCB->activeRenderPass,
10006                                                     pSubCB->beginInfo.pInheritanceInfo->renderPass, errorString)) {
10007                    skipCall |= log_msg(
10008                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10009                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
10010                        "vkCmdExecuteCommands(): Secondary Command Buffer (%p) w/ render pass (%#" PRIxLEAST64
10011                        ") is incompatible w/ primary command buffer (%p) w/ render pass (%#" PRIxLEAST64 ") due to: %s",
10012                        (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer,
10013                        (uint64_t)pCB->activeRenderPass, errorString.c_str());
10014                }
10015                //  If framebuffer for secondary CB is not NULL, then it must match FB from vkCmdBeginRenderPass()
10016                //   that this CB will be executed in AND framebuffer must have been created w/ RP compatible w/ renderpass
10017                if (pSubCB->beginInfo.pInheritanceInfo->framebuffer) {
10018                    if (pSubCB->beginInfo.pInheritanceInfo->framebuffer != pCB->activeRenderPassBeginInfo.framebuffer) {
10019                        skipCall |= log_msg(
10020                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10021                            (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS",
10022                            "vkCmdExecuteCommands(): Secondary Command Buffer (%p) references framebuffer (%#" PRIxLEAST64
10023                            ") that does not match framebuffer (%#" PRIxLEAST64 ") in active renderpass (%#" PRIxLEAST64 ").",
10024                            (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->framebuffer,
10025                            (uint64_t)pCB->activeRenderPassBeginInfo.framebuffer, (uint64_t)pCB->activeRenderPass);
10026                    }
10027                }
10028            }
10029            // TODO(mlentine): Move more logic into this method
10030            skipCall |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
10031            skipCall |= validateCommandBufferState(dev_data, pSubCB);
10032            // Secondary cmdBuffers are considered pending execution starting w/
10033            // being recorded
10034            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
10035                if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
10036                    skipCall |= log_msg(
10037                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10038                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10039                        "Attempt to simultaneously execute CB %#" PRIxLEAST64 " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10040                        "set!",
10041                        (uint64_t)(pCB->commandBuffer));
10042                }
10043                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
10044                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
10045                    skipCall |= log_msg(
10046                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10047                        (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10048                        "vkCmdExecuteCommands(): Secondary Command Buffer (%#" PRIxLEAST64
10049                        ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
10050                        "(%#" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10051                                          "set, even though it does.",
10052                        (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer));
10053                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
10054                }
10055            }
10056            if (!pCB->activeQueries.empty() && !dev_data->physDevProperties.features.inheritedQueries) {
10057                skipCall |=
10058                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10059                            reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
10060                            "vkCmdExecuteCommands(): Secondary Command Buffer "
10061                            "(%#" PRIxLEAST64 ") cannot be submitted with a query in "
10062                            "flight and inherited queries not "
10063                            "supported on this device.",
10064                            reinterpret_cast<uint64_t>(pCommandBuffers[i]));
10065            }
10066            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
10067            pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
10068            dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
10069        }
10070        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands");
10071        skipCall |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
10072    }
10073    loader_platform_thread_unlock_mutex(&globalLock);
10074    if (VK_FALSE == skipCall)
10075        dev_data->device_dispatch_table->CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
10076}
10077
10078VkBool32 ValidateMapImageLayouts(VkDevice device, VkDeviceMemory mem) {
10079    VkBool32 skip_call = VK_FALSE;
10080    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10081    auto mem_data = dev_data->memObjMap.find(mem);
10082    if ((mem_data != dev_data->memObjMap.end()) && (mem_data->second.image != VK_NULL_HANDLE)) {
10083        std::vector<VkImageLayout> layouts;
10084        if (FindLayouts(dev_data, mem_data->second.image, layouts)) {
10085            for (auto layout : layouts) {
10086                if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
10087                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10088                                         __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
10089                                                                                         "GENERAL or PREINITIALIZED are supported.",
10090                                         string_VkImageLayout(layout));
10091                }
10092            }
10093        }
10094    }
10095    return skip_call;
10096}
10097
10098VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10099vkMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
10100    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10101
10102    VkBool32 skip_call = VK_FALSE;
10103    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10104    loader_platform_thread_lock_mutex(&globalLock);
10105#if MTMERGESOURCE
10106    DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
10107    if (pMemObj) {
10108        pMemObj->valid = true;
10109        if ((memProps.memoryTypes[pMemObj->allocInfo.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
10110            skip_call =
10111                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10112                        (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM",
10113                        "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj %#" PRIxLEAST64, (uint64_t)mem);
10114        }
10115    }
10116    skip_call |= validateMemRange(dev_data, mem, offset, size);
10117    storeMemRanges(dev_data, mem, offset, size);
10118#endif
10119    skip_call |= ValidateMapImageLayouts(device, mem);
10120    loader_platform_thread_unlock_mutex(&globalLock);
10121
10122    if (VK_FALSE == skip_call) {
10123        result = dev_data->device_dispatch_table->MapMemory(device, mem, offset, size, flags, ppData);
10124#if MTMERGESOURCE
10125        loader_platform_thread_lock_mutex(&globalLock);
10126        initializeAndTrackMemory(dev_data, mem, size, ppData);
10127        loader_platform_thread_unlock_mutex(&globalLock);
10128#endif
10129    }
10130    return result;
10131}
10132
10133#if MTMERGESOURCE
10134VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkUnmapMemory(VkDevice device, VkDeviceMemory mem) {
10135    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10136    VkBool32 skipCall = VK_FALSE;
10137
10138    loader_platform_thread_lock_mutex(&globalLock);
10139    skipCall |= deleteMemRanges(my_data, mem);
10140    loader_platform_thread_unlock_mutex(&globalLock);
10141    if (VK_FALSE == skipCall) {
10142        my_data->device_dispatch_table->UnmapMemory(device, mem);
10143    }
10144}
10145
10146VkBool32 validateMemoryIsMapped(layer_data *my_data, const char *funcName, uint32_t memRangeCount,
10147                                const VkMappedMemoryRange *pMemRanges) {
10148    VkBool32 skipCall = VK_FALSE;
10149    for (uint32_t i = 0; i < memRangeCount; ++i) {
10150        auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
10151        if (mem_element != my_data->memObjMap.end()) {
10152            if (mem_element->second.memRange.offset > pMemRanges[i].offset) {
10153                skipCall |= log_msg(
10154                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10155                    (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10156                    "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
10157                    "(" PRINTF_SIZE_T_SPECIFIER ").",
10158                    funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_element->second.memRange.offset));
10159            }
10160            if ((mem_element->second.memRange.size != VK_WHOLE_SIZE) &&
10161                ((mem_element->second.memRange.offset + mem_element->second.memRange.size) <
10162                 (pMemRanges[i].offset + pMemRanges[i].size))) {
10163                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10164                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10165                                    MEMTRACK_INVALID_MAP, "MEM", "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER
10166                                                                 ") exceeds the Memory Object's upper-bound "
10167                                                                 "(" PRINTF_SIZE_T_SPECIFIER ").",
10168                                    funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
10169                                    static_cast<size_t>(mem_element->second.memRange.offset + mem_element->second.memRange.size));
10170            }
10171        }
10172    }
10173    return skipCall;
10174}
10175
10176VkBool32 validateAndCopyNoncoherentMemoryToDriver(layer_data *my_data, uint32_t memRangeCount,
10177                                                  const VkMappedMemoryRange *pMemRanges) {
10178    VkBool32 skipCall = VK_FALSE;
10179    for (uint32_t i = 0; i < memRangeCount; ++i) {
10180        auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
10181        if (mem_element != my_data->memObjMap.end()) {
10182            if (mem_element->second.pData) {
10183                VkDeviceSize size = mem_element->second.memRange.size;
10184                VkDeviceSize half_size = (size / 2);
10185                char *data = static_cast<char *>(mem_element->second.pData);
10186                for (auto j = 0; j < half_size; ++j) {
10187                    if (data[j] != NoncoherentMemoryFillValue) {
10188                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10189                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10190                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64,
10191                                            (uint64_t)pMemRanges[i].memory);
10192                    }
10193                }
10194                for (auto j = size + half_size; j < 2 * size; ++j) {
10195                    if (data[j] != NoncoherentMemoryFillValue) {
10196                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10197                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10198                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64,
10199                                            (uint64_t)pMemRanges[i].memory);
10200                    }
10201                }
10202                memcpy(mem_element->second.pDriverData, static_cast<void *>(data + (size_t)(half_size)), (size_t)(size));
10203            }
10204        }
10205    }
10206    return skipCall;
10207}
10208
10209VK_LAYER_EXPORT VkResult VKAPI_CALL
10210vkFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10211    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10212    VkBool32 skipCall = VK_FALSE;
10213    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10214
10215    loader_platform_thread_lock_mutex(&globalLock);
10216    skipCall |= validateAndCopyNoncoherentMemoryToDriver(my_data, memRangeCount, pMemRanges);
10217    skipCall |= validateMemoryIsMapped(my_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
10218    loader_platform_thread_unlock_mutex(&globalLock);
10219    if (VK_FALSE == skipCall) {
10220        result = my_data->device_dispatch_table->FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
10221    }
10222    return result;
10223}
10224
10225VK_LAYER_EXPORT VkResult VKAPI_CALL
10226vkInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10227    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10228    VkBool32 skipCall = VK_FALSE;
10229    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10230
10231    loader_platform_thread_lock_mutex(&globalLock);
10232    skipCall |= validateMemoryIsMapped(my_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
10233    loader_platform_thread_unlock_mutex(&globalLock);
10234    if (VK_FALSE == skipCall) {
10235        result = my_data->device_dispatch_table->InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
10236    }
10237    return result;
10238}
10239#endif
10240
10241VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
10242    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10243    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10244    VkBool32 skipCall = VK_FALSE;
10245#if MTMERGESOURCE
10246    loader_platform_thread_lock_mutex(&globalLock);
10247    // Track objects tied to memory
10248    uint64_t image_handle = (uint64_t)(image);
10249    skipCall =
10250        set_mem_binding(dev_data, device, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
10251    add_object_binding_info(dev_data, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, mem);
10252    {
10253        VkMemoryRequirements memRequirements;
10254        vkGetImageMemoryRequirements(device, image, &memRequirements);
10255        skipCall |= validate_buffer_image_aliasing(dev_data, image_handle, mem, memoryOffset, memRequirements,
10256                                                   dev_data->memObjMap[mem].imageRanges, dev_data->memObjMap[mem].bufferRanges,
10257                                                   VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
10258    }
10259    print_mem_list(dev_data, device);
10260    loader_platform_thread_unlock_mutex(&globalLock);
10261#endif
10262    if (VK_FALSE == skipCall) {
10263        result = dev_data->device_dispatch_table->BindImageMemory(device, image, mem, memoryOffset);
10264        VkMemoryRequirements memRequirements;
10265        dev_data->device_dispatch_table->GetImageMemoryRequirements(device, image, &memRequirements);
10266        loader_platform_thread_lock_mutex(&globalLock);
10267        dev_data->memObjMap[mem].image = image;
10268        dev_data->imageMap[image].mem = mem;
10269        dev_data->imageMap[image].memOffset = memoryOffset;
10270        dev_data->imageMap[image].memSize = memRequirements.size;
10271        loader_platform_thread_unlock_mutex(&globalLock);
10272    }
10273    return result;
10274}
10275
10276VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent(VkDevice device, VkEvent event) {
10277    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10278    loader_platform_thread_lock_mutex(&globalLock);
10279    dev_data->eventMap[event].needsSignaled = false;
10280    dev_data->eventMap[event].stageMask = VK_PIPELINE_STAGE_HOST_BIT;
10281    loader_platform_thread_unlock_mutex(&globalLock);
10282    VkResult result = dev_data->device_dispatch_table->SetEvent(device, event);
10283    return result;
10284}
10285
10286VKAPI_ATTR VkResult VKAPI_CALL
10287vkQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
10288    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10289    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10290    VkBool32 skip_call = VK_FALSE;
10291#if MTMERGESOURCE
10292    //MTMTODO : Merge this code with the checks below
10293    loader_platform_thread_lock_mutex(&globalLock);
10294
10295    for (uint32_t i = 0; i < bindInfoCount; i++) {
10296        const VkBindSparseInfo *bindInfo = &pBindInfo[i];
10297        // Track objects tied to memory
10298        for (uint32_t j = 0; j < bindInfo->bufferBindCount; j++) {
10299            for (uint32_t k = 0; k < bindInfo->pBufferBinds[j].bindCount; k++) {
10300                if (set_sparse_mem_binding(dev_data, queue, bindInfo->pBufferBinds[j].pBinds[k].memory,
10301                                           (uint64_t)bindInfo->pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
10302                                           "vkQueueBindSparse"))
10303                    skip_call = VK_TRUE;
10304            }
10305        }
10306        for (uint32_t j = 0; j < bindInfo->imageOpaqueBindCount; j++) {
10307            for (uint32_t k = 0; k < bindInfo->pImageOpaqueBinds[j].bindCount; k++) {
10308                if (set_sparse_mem_binding(dev_data, queue, bindInfo->pImageOpaqueBinds[j].pBinds[k].memory,
10309                                           (uint64_t)bindInfo->pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10310                                           "vkQueueBindSparse"))
10311                    skip_call = VK_TRUE;
10312            }
10313        }
10314        for (uint32_t j = 0; j < bindInfo->imageBindCount; j++) {
10315            for (uint32_t k = 0; k < bindInfo->pImageBinds[j].bindCount; k++) {
10316                if (set_sparse_mem_binding(dev_data, queue, bindInfo->pImageBinds[j].pBinds[k].memory,
10317                                           (uint64_t)bindInfo->pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10318                                           "vkQueueBindSparse"))
10319                    skip_call = VK_TRUE;
10320            }
10321        }
10322        // Validate semaphore state
10323        for (uint32_t i = 0; i < bindInfo->waitSemaphoreCount; i++) {
10324            VkSemaphore sem = bindInfo->pWaitSemaphores[i];
10325
10326            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10327                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_SIGNALLED) {
10328                    skip_call =
10329                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10330                                (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
10331                                "vkQueueBindSparse: Semaphore must be in signaled state before passing to pWaitSemaphores");
10332                }
10333                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_WAIT;
10334            }
10335        }
10336        for (uint32_t i = 0; i < bindInfo->signalSemaphoreCount; i++) {
10337            VkSemaphore sem = bindInfo->pSignalSemaphores[i];
10338
10339            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10340                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_UNSET) {
10341                    skip_call =
10342                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10343                                (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
10344                                "vkQueueBindSparse: Semaphore must not be currently signaled or in a wait state");
10345                }
10346                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
10347            }
10348        }
10349    }
10350
10351    print_mem_list(dev_data, queue);
10352    loader_platform_thread_unlock_mutex(&globalLock);
10353#endif
10354    loader_platform_thread_lock_mutex(&globalLock);
10355    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
10356        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
10357        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
10358            if (dev_data->semaphoreMap[bindInfo.pWaitSemaphores[i]].signaled) {
10359                dev_data->semaphoreMap[bindInfo.pWaitSemaphores[i]].signaled = 0;
10360            } else {
10361                skip_call |=
10362                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
10363                            __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10364                            "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
10365                            (uint64_t)(queue), (uint64_t)(bindInfo.pWaitSemaphores[i]));
10366            }
10367        }
10368        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
10369            dev_data->semaphoreMap[bindInfo.pSignalSemaphores[i]].signaled = 1;
10370        }
10371    }
10372    loader_platform_thread_unlock_mutex(&globalLock);
10373
10374    if (VK_FALSE == skip_call)
10375        return dev_data->device_dispatch_table->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
10376#if MTMERGESOURCE
10377    // Update semaphore state
10378    loader_platform_thread_lock_mutex(&globalLock);
10379    for (uint32_t bind_info_idx = 0; bind_info_idx < bindInfoCount; bind_info_idx++) {
10380        const VkBindSparseInfo *bindInfo = &pBindInfo[bind_info_idx];
10381        for (uint32_t i = 0; i < bindInfo->waitSemaphoreCount; i++) {
10382            VkSemaphore sem = bindInfo->pWaitSemaphores[i];
10383
10384            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10385                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_UNSET;
10386            }
10387        }
10388    }
10389    loader_platform_thread_unlock_mutex(&globalLock);
10390#endif
10391
10392    return result;
10393}
10394
10395VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
10396                                                 const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
10397    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10398    VkResult result = dev_data->device_dispatch_table->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
10399    if (result == VK_SUCCESS) {
10400        loader_platform_thread_lock_mutex(&globalLock);
10401        SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
10402        sNode->signaled = 0;
10403        sNode->queue = VK_NULL_HANDLE;
10404        sNode->in_use.store(0);
10405        sNode->state = MEMTRACK_SEMAPHORE_STATE_UNSET;
10406        loader_platform_thread_unlock_mutex(&globalLock);
10407    }
10408    return result;
10409}
10410
10411VKAPI_ATTR VkResult VKAPI_CALL
10412vkCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
10413    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10414    VkResult result = dev_data->device_dispatch_table->CreateEvent(device, pCreateInfo, pAllocator, pEvent);
10415    if (result == VK_SUCCESS) {
10416        loader_platform_thread_lock_mutex(&globalLock);
10417        dev_data->eventMap[*pEvent].needsSignaled = false;
10418        dev_data->eventMap[*pEvent].in_use.store(0);
10419        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
10420        loader_platform_thread_unlock_mutex(&globalLock);
10421    }
10422    return result;
10423}
10424
10425VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
10426                                                                    const VkAllocationCallbacks *pAllocator,
10427                                                                    VkSwapchainKHR *pSwapchain) {
10428    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10429    VkResult result = dev_data->device_dispatch_table->CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
10430
10431    if (VK_SUCCESS == result) {
10432        SWAPCHAIN_NODE *psc_node = new SWAPCHAIN_NODE(pCreateInfo);
10433        loader_platform_thread_lock_mutex(&globalLock);
10434        dev_data->device_extensions.swapchainMap[*pSwapchain] = psc_node;
10435        loader_platform_thread_unlock_mutex(&globalLock);
10436    }
10437
10438    return result;
10439}
10440
10441VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
10442vkDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
10443    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10444    bool skipCall = false;
10445
10446    loader_platform_thread_lock_mutex(&globalLock);
10447    auto swapchain_data = dev_data->device_extensions.swapchainMap.find(swapchain);
10448    if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
10449        if (swapchain_data->second->images.size() > 0) {
10450            for (auto swapchain_image : swapchain_data->second->images) {
10451                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
10452                if (image_sub != dev_data->imageSubresourceMap.end()) {
10453                    for (auto imgsubpair : image_sub->second) {
10454                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
10455                        if (image_item != dev_data->imageLayoutMap.end()) {
10456                            dev_data->imageLayoutMap.erase(image_item);
10457                        }
10458                    }
10459                    dev_data->imageSubresourceMap.erase(image_sub);
10460                }
10461#if MTMERGESOURCE
10462                skipCall = clear_object_binding(dev_data, device, (uint64_t)swapchain_image,
10463                                                VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
10464                dev_data->imageBindingMap.erase((uint64_t)swapchain_image);
10465#endif
10466            }
10467        }
10468        delete swapchain_data->second;
10469        dev_data->device_extensions.swapchainMap.erase(swapchain);
10470    }
10471    loader_platform_thread_unlock_mutex(&globalLock);
10472    if (!skipCall)
10473        dev_data->device_dispatch_table->DestroySwapchainKHR(device, swapchain, pAllocator);
10474}
10475
10476VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10477vkGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
10478    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10479    VkResult result = dev_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
10480
10481    if (result == VK_SUCCESS && pSwapchainImages != NULL) {
10482        // This should never happen and is checked by param checker.
10483        if (!pCount)
10484            return result;
10485        loader_platform_thread_lock_mutex(&globalLock);
10486        const size_t count = *pCount;
10487        auto swapchain_node = dev_data->device_extensions.swapchainMap[swapchain];
10488        if (!swapchain_node->images.empty()) {
10489            // TODO : Not sure I like the memcmp here, but it works
10490            const bool mismatch = (swapchain_node->images.size() != count ||
10491                                   memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
10492            if (mismatch) {
10493                // TODO: Verify against Valid Usage section of extension
10494                log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10495                        (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
10496                        "vkGetSwapchainInfoKHR(%" PRIu64
10497                        ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
10498                        (uint64_t)(swapchain));
10499            }
10500        }
10501        for (uint32_t i = 0; i < *pCount; ++i) {
10502            IMAGE_LAYOUT_NODE image_layout_node;
10503            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
10504            image_layout_node.format = swapchain_node->createInfo.imageFormat;
10505            dev_data->imageMap[pSwapchainImages[i]].createInfo.mipLevels = 1;
10506            dev_data->imageMap[pSwapchainImages[i]].createInfo.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
10507            swapchain_node->images.push_back(pSwapchainImages[i]);
10508            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
10509            dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
10510            dev_data->imageLayoutMap[subpair] = image_layout_node;
10511            dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
10512        }
10513        if (!swapchain_node->images.empty()) {
10514            for (auto image : swapchain_node->images) {
10515                // Add image object binding, then insert the new Mem Object and then bind it to created image
10516#if MTMERGESOURCE
10517                add_object_create_info(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10518                                       &swapchain_node->createInfo);
10519#endif
10520            }
10521        }
10522        loader_platform_thread_unlock_mutex(&globalLock);
10523    }
10524    return result;
10525}
10526
10527VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
10528    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10529    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10530    bool skip_call = false;
10531
10532    if (pPresentInfo) {
10533        loader_platform_thread_lock_mutex(&globalLock);
10534        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10535            if (dev_data->semaphoreMap[pPresentInfo->pWaitSemaphores[i]].signaled) {
10536                dev_data->semaphoreMap[pPresentInfo->pWaitSemaphores[i]].signaled = 0;
10537            } else {
10538                skip_call |=
10539                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
10540                            __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10541                            "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
10542                            (uint64_t)(queue), (uint64_t)(pPresentInfo->pWaitSemaphores[i]));
10543            }
10544        }
10545        VkDeviceMemory mem;
10546        for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
10547            auto swapchain_data = dev_data->device_extensions.swapchainMap.find(pPresentInfo->pSwapchains[i]);
10548            if (swapchain_data != dev_data->device_extensions.swapchainMap.end() &&
10549                pPresentInfo->pImageIndices[i] < swapchain_data->second->images.size()) {
10550                VkImage image = swapchain_data->second->images[pPresentInfo->pImageIndices[i]];
10551#if MTMERGESOURCE
10552                skip_call |=
10553                    get_mem_binding_from_object(dev_data, queue, (uint64_t)(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
10554                skip_call |= validate_memory_is_valid(dev_data, mem, "vkQueuePresentKHR()", image);
10555#endif
10556                vector<VkImageLayout> layouts;
10557                if (FindLayouts(dev_data, image, layouts)) {
10558                    for (auto layout : layouts) {
10559                        if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
10560                            skip_call |=
10561                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
10562                                        reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10563                                        "Images passed to present must be in layout "
10564                                        "PRESENT_SOURCE_KHR but is in %s",
10565                                        string_VkImageLayout(layout));
10566                        }
10567                    }
10568                }
10569            }
10570        }
10571        loader_platform_thread_unlock_mutex(&globalLock);
10572    }
10573
10574    if (!skip_call)
10575        result = dev_data->device_dispatch_table->QueuePresentKHR(queue, pPresentInfo);
10576#if MTMERGESOURCE
10577    loader_platform_thread_lock_mutex(&globalLock);
10578    for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; i++) {
10579        VkSemaphore sem = pPresentInfo->pWaitSemaphores[i];
10580        if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10581            dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_UNSET;
10582        }
10583    }
10584    loader_platform_thread_unlock_mutex(&globalLock);
10585#endif
10586    return result;
10587}
10588
10589VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
10590                                                     VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
10591    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10592    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10593    bool skipCall = false;
10594#if MTMERGESOURCE
10595    loader_platform_thread_lock_mutex(&globalLock);
10596    if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
10597        if (dev_data->semaphoreMap[semaphore].state != MEMTRACK_SEMAPHORE_STATE_UNSET) {
10598            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10599                               (uint64_t)semaphore, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
10600                               "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state");
10601        }
10602        dev_data->semaphoreMap[semaphore].state = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
10603    }
10604    auto fence_data = dev_data->fenceMap.find(fence);
10605    if (fence_data != dev_data->fenceMap.end()) {
10606        fence_data->second.swapchain = swapchain;
10607    }
10608    loader_platform_thread_unlock_mutex(&globalLock);
10609#endif
10610    if (!skipCall) {
10611        result =
10612            dev_data->device_dispatch_table->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
10613    }
10614    loader_platform_thread_lock_mutex(&globalLock);
10615    // FIXME/TODO: Need to add some thing code the "fence" parameter
10616    dev_data->semaphoreMap[semaphore].signaled = 1;
10617    loader_platform_thread_unlock_mutex(&globalLock);
10618    return result;
10619}
10620
10621VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10622vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
10623                               const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
10624    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10625    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10626    VkResult res = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
10627    if (VK_SUCCESS == res) {
10628        loader_platform_thread_lock_mutex(&globalLock);
10629        res = layer_create_msg_callback(my_data->report_data, pCreateInfo, pAllocator, pMsgCallback);
10630        loader_platform_thread_unlock_mutex(&globalLock);
10631    }
10632    return res;
10633}
10634
10635VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkInstance instance,
10636                                                                           VkDebugReportCallbackEXT msgCallback,
10637                                                                           const VkAllocationCallbacks *pAllocator) {
10638    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10639    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10640    pTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
10641    loader_platform_thread_lock_mutex(&globalLock);
10642    layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator);
10643    loader_platform_thread_unlock_mutex(&globalLock);
10644}
10645
10646VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
10647vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
10648                        size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
10649    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10650    my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
10651                                                            pMsg);
10652}
10653
10654VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
10655    if (!strcmp(funcName, "vkGetDeviceProcAddr"))
10656        return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
10657    if (!strcmp(funcName, "vkDestroyDevice"))
10658        return (PFN_vkVoidFunction)vkDestroyDevice;
10659    if (!strcmp(funcName, "vkQueueSubmit"))
10660        return (PFN_vkVoidFunction)vkQueueSubmit;
10661    if (!strcmp(funcName, "vkWaitForFences"))
10662        return (PFN_vkVoidFunction)vkWaitForFences;
10663    if (!strcmp(funcName, "vkGetFenceStatus"))
10664        return (PFN_vkVoidFunction)vkGetFenceStatus;
10665    if (!strcmp(funcName, "vkQueueWaitIdle"))
10666        return (PFN_vkVoidFunction)vkQueueWaitIdle;
10667    if (!strcmp(funcName, "vkDeviceWaitIdle"))
10668        return (PFN_vkVoidFunction)vkDeviceWaitIdle;
10669    if (!strcmp(funcName, "vkGetDeviceQueue"))
10670        return (PFN_vkVoidFunction)vkGetDeviceQueue;
10671    if (!strcmp(funcName, "vkDestroyInstance"))
10672        return (PFN_vkVoidFunction)vkDestroyInstance;
10673    if (!strcmp(funcName, "vkDestroyDevice"))
10674        return (PFN_vkVoidFunction)vkDestroyDevice;
10675    if (!strcmp(funcName, "vkDestroyFence"))
10676        return (PFN_vkVoidFunction)vkDestroyFence;
10677    if (!strcmp(funcName, "vkResetFences"))
10678        return (PFN_vkVoidFunction)vkResetFences;
10679    if (!strcmp(funcName, "vkDestroySemaphore"))
10680        return (PFN_vkVoidFunction)vkDestroySemaphore;
10681    if (!strcmp(funcName, "vkDestroyEvent"))
10682        return (PFN_vkVoidFunction)vkDestroyEvent;
10683    if (!strcmp(funcName, "vkDestroyQueryPool"))
10684        return (PFN_vkVoidFunction)vkDestroyQueryPool;
10685    if (!strcmp(funcName, "vkDestroyBuffer"))
10686        return (PFN_vkVoidFunction)vkDestroyBuffer;
10687    if (!strcmp(funcName, "vkDestroyBufferView"))
10688        return (PFN_vkVoidFunction)vkDestroyBufferView;
10689    if (!strcmp(funcName, "vkDestroyImage"))
10690        return (PFN_vkVoidFunction)vkDestroyImage;
10691    if (!strcmp(funcName, "vkDestroyImageView"))
10692        return (PFN_vkVoidFunction)vkDestroyImageView;
10693    if (!strcmp(funcName, "vkDestroyShaderModule"))
10694        return (PFN_vkVoidFunction)vkDestroyShaderModule;
10695    if (!strcmp(funcName, "vkDestroyPipeline"))
10696        return (PFN_vkVoidFunction)vkDestroyPipeline;
10697    if (!strcmp(funcName, "vkDestroyPipelineLayout"))
10698        return (PFN_vkVoidFunction)vkDestroyPipelineLayout;
10699    if (!strcmp(funcName, "vkDestroySampler"))
10700        return (PFN_vkVoidFunction)vkDestroySampler;
10701    if (!strcmp(funcName, "vkDestroyDescriptorSetLayout"))
10702        return (PFN_vkVoidFunction)vkDestroyDescriptorSetLayout;
10703    if (!strcmp(funcName, "vkDestroyDescriptorPool"))
10704        return (PFN_vkVoidFunction)vkDestroyDescriptorPool;
10705    if (!strcmp(funcName, "vkDestroyFramebuffer"))
10706        return (PFN_vkVoidFunction)vkDestroyFramebuffer;
10707    if (!strcmp(funcName, "vkDestroyRenderPass"))
10708        return (PFN_vkVoidFunction)vkDestroyRenderPass;
10709    if (!strcmp(funcName, "vkCreateBuffer"))
10710        return (PFN_vkVoidFunction)vkCreateBuffer;
10711    if (!strcmp(funcName, "vkCreateBufferView"))
10712        return (PFN_vkVoidFunction)vkCreateBufferView;
10713    if (!strcmp(funcName, "vkCreateImage"))
10714        return (PFN_vkVoidFunction)vkCreateImage;
10715    if (!strcmp(funcName, "vkCreateImageView"))
10716        return (PFN_vkVoidFunction)vkCreateImageView;
10717    if (!strcmp(funcName, "vkCreateFence"))
10718        return (PFN_vkVoidFunction)vkCreateFence;
10719    if (!strcmp(funcName, "CreatePipelineCache"))
10720        return (PFN_vkVoidFunction)vkCreatePipelineCache;
10721    if (!strcmp(funcName, "DestroyPipelineCache"))
10722        return (PFN_vkVoidFunction)vkDestroyPipelineCache;
10723    if (!strcmp(funcName, "GetPipelineCacheData"))
10724        return (PFN_vkVoidFunction)vkGetPipelineCacheData;
10725    if (!strcmp(funcName, "MergePipelineCaches"))
10726        return (PFN_vkVoidFunction)vkMergePipelineCaches;
10727    if (!strcmp(funcName, "vkCreateGraphicsPipelines"))
10728        return (PFN_vkVoidFunction)vkCreateGraphicsPipelines;
10729    if (!strcmp(funcName, "vkCreateComputePipelines"))
10730        return (PFN_vkVoidFunction)vkCreateComputePipelines;
10731    if (!strcmp(funcName, "vkCreateSampler"))
10732        return (PFN_vkVoidFunction)vkCreateSampler;
10733    if (!strcmp(funcName, "vkCreateDescriptorSetLayout"))
10734        return (PFN_vkVoidFunction)vkCreateDescriptorSetLayout;
10735    if (!strcmp(funcName, "vkCreatePipelineLayout"))
10736        return (PFN_vkVoidFunction)vkCreatePipelineLayout;
10737    if (!strcmp(funcName, "vkCreateDescriptorPool"))
10738        return (PFN_vkVoidFunction)vkCreateDescriptorPool;
10739    if (!strcmp(funcName, "vkResetDescriptorPool"))
10740        return (PFN_vkVoidFunction)vkResetDescriptorPool;
10741    if (!strcmp(funcName, "vkAllocateDescriptorSets"))
10742        return (PFN_vkVoidFunction)vkAllocateDescriptorSets;
10743    if (!strcmp(funcName, "vkFreeDescriptorSets"))
10744        return (PFN_vkVoidFunction)vkFreeDescriptorSets;
10745    if (!strcmp(funcName, "vkUpdateDescriptorSets"))
10746        return (PFN_vkVoidFunction)vkUpdateDescriptorSets;
10747    if (!strcmp(funcName, "vkCreateCommandPool"))
10748        return (PFN_vkVoidFunction)vkCreateCommandPool;
10749    if (!strcmp(funcName, "vkDestroyCommandPool"))
10750        return (PFN_vkVoidFunction)vkDestroyCommandPool;
10751    if (!strcmp(funcName, "vkResetCommandPool"))
10752        return (PFN_vkVoidFunction)vkResetCommandPool;
10753    if (!strcmp(funcName, "vkCreateQueryPool"))
10754        return (PFN_vkVoidFunction)vkCreateQueryPool;
10755    if (!strcmp(funcName, "vkAllocateCommandBuffers"))
10756        return (PFN_vkVoidFunction)vkAllocateCommandBuffers;
10757    if (!strcmp(funcName, "vkFreeCommandBuffers"))
10758        return (PFN_vkVoidFunction)vkFreeCommandBuffers;
10759    if (!strcmp(funcName, "vkBeginCommandBuffer"))
10760        return (PFN_vkVoidFunction)vkBeginCommandBuffer;
10761    if (!strcmp(funcName, "vkEndCommandBuffer"))
10762        return (PFN_vkVoidFunction)vkEndCommandBuffer;
10763    if (!strcmp(funcName, "vkResetCommandBuffer"))
10764        return (PFN_vkVoidFunction)vkResetCommandBuffer;
10765    if (!strcmp(funcName, "vkCmdBindPipeline"))
10766        return (PFN_vkVoidFunction)vkCmdBindPipeline;
10767    if (!strcmp(funcName, "vkCmdSetViewport"))
10768        return (PFN_vkVoidFunction)vkCmdSetViewport;
10769    if (!strcmp(funcName, "vkCmdSetScissor"))
10770        return (PFN_vkVoidFunction)vkCmdSetScissor;
10771    if (!strcmp(funcName, "vkCmdSetLineWidth"))
10772        return (PFN_vkVoidFunction)vkCmdSetLineWidth;
10773    if (!strcmp(funcName, "vkCmdSetDepthBias"))
10774        return (PFN_vkVoidFunction)vkCmdSetDepthBias;
10775    if (!strcmp(funcName, "vkCmdSetBlendConstants"))
10776        return (PFN_vkVoidFunction)vkCmdSetBlendConstants;
10777    if (!strcmp(funcName, "vkCmdSetDepthBounds"))
10778        return (PFN_vkVoidFunction)vkCmdSetDepthBounds;
10779    if (!strcmp(funcName, "vkCmdSetStencilCompareMask"))
10780        return (PFN_vkVoidFunction)vkCmdSetStencilCompareMask;
10781    if (!strcmp(funcName, "vkCmdSetStencilWriteMask"))
10782        return (PFN_vkVoidFunction)vkCmdSetStencilWriteMask;
10783    if (!strcmp(funcName, "vkCmdSetStencilReference"))
10784        return (PFN_vkVoidFunction)vkCmdSetStencilReference;
10785    if (!strcmp(funcName, "vkCmdBindDescriptorSets"))
10786        return (PFN_vkVoidFunction)vkCmdBindDescriptorSets;
10787    if (!strcmp(funcName, "vkCmdBindVertexBuffers"))
10788        return (PFN_vkVoidFunction)vkCmdBindVertexBuffers;
10789    if (!strcmp(funcName, "vkCmdBindIndexBuffer"))
10790        return (PFN_vkVoidFunction)vkCmdBindIndexBuffer;
10791    if (!strcmp(funcName, "vkCmdDraw"))
10792        return (PFN_vkVoidFunction)vkCmdDraw;
10793    if (!strcmp(funcName, "vkCmdDrawIndexed"))
10794        return (PFN_vkVoidFunction)vkCmdDrawIndexed;
10795    if (!strcmp(funcName, "vkCmdDrawIndirect"))
10796        return (PFN_vkVoidFunction)vkCmdDrawIndirect;
10797    if (!strcmp(funcName, "vkCmdDrawIndexedIndirect"))
10798        return (PFN_vkVoidFunction)vkCmdDrawIndexedIndirect;
10799    if (!strcmp(funcName, "vkCmdDispatch"))
10800        return (PFN_vkVoidFunction)vkCmdDispatch;
10801    if (!strcmp(funcName, "vkCmdDispatchIndirect"))
10802        return (PFN_vkVoidFunction)vkCmdDispatchIndirect;
10803    if (!strcmp(funcName, "vkCmdCopyBuffer"))
10804        return (PFN_vkVoidFunction)vkCmdCopyBuffer;
10805    if (!strcmp(funcName, "vkCmdCopyImage"))
10806        return (PFN_vkVoidFunction)vkCmdCopyImage;
10807    if (!strcmp(funcName, "vkCmdBlitImage"))
10808        return (PFN_vkVoidFunction)vkCmdBlitImage;
10809    if (!strcmp(funcName, "vkCmdCopyBufferToImage"))
10810        return (PFN_vkVoidFunction)vkCmdCopyBufferToImage;
10811    if (!strcmp(funcName, "vkCmdCopyImageToBuffer"))
10812        return (PFN_vkVoidFunction)vkCmdCopyImageToBuffer;
10813    if (!strcmp(funcName, "vkCmdUpdateBuffer"))
10814        return (PFN_vkVoidFunction)vkCmdUpdateBuffer;
10815    if (!strcmp(funcName, "vkCmdFillBuffer"))
10816        return (PFN_vkVoidFunction)vkCmdFillBuffer;
10817    if (!strcmp(funcName, "vkCmdClearColorImage"))
10818        return (PFN_vkVoidFunction)vkCmdClearColorImage;
10819    if (!strcmp(funcName, "vkCmdClearDepthStencilImage"))
10820        return (PFN_vkVoidFunction)vkCmdClearDepthStencilImage;
10821    if (!strcmp(funcName, "vkCmdClearAttachments"))
10822        return (PFN_vkVoidFunction)vkCmdClearAttachments;
10823    if (!strcmp(funcName, "vkCmdResolveImage"))
10824        return (PFN_vkVoidFunction)vkCmdResolveImage;
10825    if (!strcmp(funcName, "vkCmdSetEvent"))
10826        return (PFN_vkVoidFunction)vkCmdSetEvent;
10827    if (!strcmp(funcName, "vkCmdResetEvent"))
10828        return (PFN_vkVoidFunction)vkCmdResetEvent;
10829    if (!strcmp(funcName, "vkCmdWaitEvents"))
10830        return (PFN_vkVoidFunction)vkCmdWaitEvents;
10831    if (!strcmp(funcName, "vkCmdPipelineBarrier"))
10832        return (PFN_vkVoidFunction)vkCmdPipelineBarrier;
10833    if (!strcmp(funcName, "vkCmdBeginQuery"))
10834        return (PFN_vkVoidFunction)vkCmdBeginQuery;
10835    if (!strcmp(funcName, "vkCmdEndQuery"))
10836        return (PFN_vkVoidFunction)vkCmdEndQuery;
10837    if (!strcmp(funcName, "vkCmdResetQueryPool"))
10838        return (PFN_vkVoidFunction)vkCmdResetQueryPool;
10839    if (!strcmp(funcName, "vkCmdCopyQueryPoolResults"))
10840        return (PFN_vkVoidFunction)vkCmdCopyQueryPoolResults;
10841    if (!strcmp(funcName, "vkCmdPushConstants"))
10842        return (PFN_vkVoidFunction)vkCmdPushConstants;
10843    if (!strcmp(funcName, "vkCmdWriteTimestamp"))
10844        return (PFN_vkVoidFunction)vkCmdWriteTimestamp;
10845    if (!strcmp(funcName, "vkCreateFramebuffer"))
10846        return (PFN_vkVoidFunction)vkCreateFramebuffer;
10847    if (!strcmp(funcName, "vkCreateShaderModule"))
10848        return (PFN_vkVoidFunction)vkCreateShaderModule;
10849    if (!strcmp(funcName, "vkCreateRenderPass"))
10850        return (PFN_vkVoidFunction)vkCreateRenderPass;
10851    if (!strcmp(funcName, "vkCmdBeginRenderPass"))
10852        return (PFN_vkVoidFunction)vkCmdBeginRenderPass;
10853    if (!strcmp(funcName, "vkCmdNextSubpass"))
10854        return (PFN_vkVoidFunction)vkCmdNextSubpass;
10855    if (!strcmp(funcName, "vkCmdEndRenderPass"))
10856        return (PFN_vkVoidFunction)vkCmdEndRenderPass;
10857    if (!strcmp(funcName, "vkCmdExecuteCommands"))
10858        return (PFN_vkVoidFunction)vkCmdExecuteCommands;
10859    if (!strcmp(funcName, "vkSetEvent"))
10860        return (PFN_vkVoidFunction)vkSetEvent;
10861    if (!strcmp(funcName, "vkMapMemory"))
10862        return (PFN_vkVoidFunction)vkMapMemory;
10863#if MTMERGESOURCE
10864    if (!strcmp(funcName, "vkUnmapMemory"))
10865        return (PFN_vkVoidFunction)vkUnmapMemory;
10866    if (!strcmp(funcName, "vkAllocateMemory"))
10867        return (PFN_vkVoidFunction)vkAllocateMemory;
10868    if (!strcmp(funcName, "vkFreeMemory"))
10869        return (PFN_vkVoidFunction)vkFreeMemory;
10870    if (!strcmp(funcName, "vkFlushMappedMemoryRanges"))
10871        return (PFN_vkVoidFunction)vkFlushMappedMemoryRanges;
10872    if (!strcmp(funcName, "vkInvalidateMappedMemoryRanges"))
10873        return (PFN_vkVoidFunction)vkInvalidateMappedMemoryRanges;
10874    if (!strcmp(funcName, "vkBindBufferMemory"))
10875        return (PFN_vkVoidFunction)vkBindBufferMemory;
10876    if (!strcmp(funcName, "vkGetBufferMemoryRequirements"))
10877        return (PFN_vkVoidFunction)vkGetBufferMemoryRequirements;
10878    if (!strcmp(funcName, "vkGetImageMemoryRequirements"))
10879        return (PFN_vkVoidFunction)vkGetImageMemoryRequirements;
10880#endif
10881    if (!strcmp(funcName, "vkGetQueryPoolResults"))
10882        return (PFN_vkVoidFunction)vkGetQueryPoolResults;
10883    if (!strcmp(funcName, "vkBindImageMemory"))
10884        return (PFN_vkVoidFunction)vkBindImageMemory;
10885    if (!strcmp(funcName, "vkQueueBindSparse"))
10886        return (PFN_vkVoidFunction)vkQueueBindSparse;
10887    if (!strcmp(funcName, "vkCreateSemaphore"))
10888        return (PFN_vkVoidFunction)vkCreateSemaphore;
10889    if (!strcmp(funcName, "vkCreateEvent"))
10890        return (PFN_vkVoidFunction)vkCreateEvent;
10891
10892    if (dev == NULL)
10893        return NULL;
10894
10895    layer_data *dev_data;
10896    dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
10897
10898    if (dev_data->device_extensions.wsi_enabled) {
10899        if (!strcmp(funcName, "vkCreateSwapchainKHR"))
10900            return (PFN_vkVoidFunction)vkCreateSwapchainKHR;
10901        if (!strcmp(funcName, "vkDestroySwapchainKHR"))
10902            return (PFN_vkVoidFunction)vkDestroySwapchainKHR;
10903        if (!strcmp(funcName, "vkGetSwapchainImagesKHR"))
10904            return (PFN_vkVoidFunction)vkGetSwapchainImagesKHR;
10905        if (!strcmp(funcName, "vkAcquireNextImageKHR"))
10906            return (PFN_vkVoidFunction)vkAcquireNextImageKHR;
10907        if (!strcmp(funcName, "vkQueuePresentKHR"))
10908            return (PFN_vkVoidFunction)vkQueuePresentKHR;
10909    }
10910
10911    VkLayerDispatchTable *pTable = dev_data->device_dispatch_table;
10912    {
10913        if (pTable->GetDeviceProcAddr == NULL)
10914            return NULL;
10915        return pTable->GetDeviceProcAddr(dev, funcName);
10916    }
10917}
10918
10919VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
10920    if (!strcmp(funcName, "vkGetInstanceProcAddr"))
10921        return (PFN_vkVoidFunction)vkGetInstanceProcAddr;
10922    if (!strcmp(funcName, "vkGetDeviceProcAddr"))
10923        return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
10924    if (!strcmp(funcName, "vkCreateInstance"))
10925        return (PFN_vkVoidFunction)vkCreateInstance;
10926    if (!strcmp(funcName, "vkCreateDevice"))
10927        return (PFN_vkVoidFunction)vkCreateDevice;
10928    if (!strcmp(funcName, "vkDestroyInstance"))
10929        return (PFN_vkVoidFunction)vkDestroyInstance;
10930#if MTMERGESOURCE
10931    if (!strcmp(funcName, "vkGetPhysicalDeviceMemoryProperties"))
10932        return (PFN_vkVoidFunction)vkGetPhysicalDeviceMemoryProperties;
10933#endif
10934    if (!strcmp(funcName, "vkEnumerateInstanceLayerProperties"))
10935        return (PFN_vkVoidFunction)vkEnumerateInstanceLayerProperties;
10936    if (!strcmp(funcName, "vkEnumerateInstanceExtensionProperties"))
10937        return (PFN_vkVoidFunction)vkEnumerateInstanceExtensionProperties;
10938    if (!strcmp(funcName, "vkEnumerateDeviceLayerProperties"))
10939        return (PFN_vkVoidFunction)vkEnumerateDeviceLayerProperties;
10940    if (!strcmp(funcName, "vkEnumerateDeviceExtensionProperties"))
10941        return (PFN_vkVoidFunction)vkEnumerateDeviceExtensionProperties;
10942
10943    if (instance == NULL)
10944        return NULL;
10945
10946    PFN_vkVoidFunction fptr;
10947
10948    layer_data *my_data;
10949    my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10950    fptr = debug_report_get_instance_proc_addr(my_data->report_data, funcName);
10951    if (fptr)
10952        return fptr;
10953
10954    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10955    if (pTable->GetInstanceProcAddr == NULL)
10956        return NULL;
10957    return pTable->GetInstanceProcAddr(instance, funcName);
10958}
10959