core_validation.cpp revision 080afef66a53dbdd085698b713697296458edfd9
1/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and/or associated documentation files (the "Materials"), to
8 * deal in the Materials without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Materials, and to permit persons to whom the Materials
11 * are furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice(s) and this permission notice shall be included
14 * in all copies or substantial portions of the Materials.
15 *
16 * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 *
20 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE
23 * USE OR OTHER DEALINGS IN THE MATERIALS
24 *
25 * Author: Cody Northrop <cnorthrop@google.com>
26 * Author: Michael Lentine <mlentine@google.com>
27 * Author: Tobin Ehlis <tobine@google.com>
28 * Author: Chia-I Wu <olv@google.com>
29 * Author: Chris Forbes <chrisf@ijw.co.nz>
30 * Author: Mark Lobodzinski <mark@lunarg.com>
31 * Author: Ian Elliott <ianelliott@google.com>
32 */
33
34// Allow use of STL min and max functions in Windows
35#define NOMINMAX
36
37// Turn on mem_tracker merged code
38#define MTMERGESOURCE 1
39
40#include <stdio.h>
41#include <stdlib.h>
42#include <string.h>
43#include <assert.h>
44#include <unordered_map>
45#include <unordered_set>
46#include <map>
47#include <string>
48#include <iostream>
49#include <algorithm>
50#include <list>
51#include <SPIRV/spirv.hpp>
52#include <set>
53
54#include "vk_loader_platform.h"
55#include "vk_dispatch_table_helper.h"
56#include "vk_struct_string_helper_cpp.h"
57#if defined(__GNUC__)
58#pragma GCC diagnostic ignored "-Wwrite-strings"
59#endif
60#if defined(__GNUC__)
61#pragma GCC diagnostic warning "-Wwrite-strings"
62#endif
63#include "vk_struct_size_helper.h"
64#include "core_validation.h"
65#include "vk_layer_config.h"
66#include "vk_layer_table.h"
67#include "vk_layer_data.h"
68#include "vk_layer_logging.h"
69#include "vk_layer_extension_utils.h"
70#include "vk_layer_utils.h"
71
72#if defined __ANDROID__
73#include <android/log.h>
74#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
75#else
76#define LOGCONSOLE(...) printf(__VA_ARGS__)
77#endif
78
79using std::unordered_map;
80using std::unordered_set;
81
82#if MTMERGESOURCE
83// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
84// Object value will be used to identify them internally.
85static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
86#endif
87// Track command pools and their command buffers
88struct CMD_POOL_INFO {
89    VkCommandPoolCreateFlags createFlags;
90    uint32_t queueFamilyIndex;
91    list<VkCommandBuffer> commandBuffers; // list container of cmd buffers allocated from this pool
92};
93
94struct devExts {
95    VkBool32 wsi_enabled;
96    unordered_map<VkSwapchainKHR, SWAPCHAIN_NODE *> swapchainMap;
97    unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
98};
99
100// fwd decls
101struct shader_module;
102
103struct layer_data {
104    debug_report_data *report_data;
105    std::vector<VkDebugReportCallbackEXT> logging_callback;
106    VkLayerDispatchTable *device_dispatch_table;
107    VkLayerInstanceDispatchTable *instance_dispatch_table;
108#if MTMERGESOURCE
109// MTMERGESOURCE - stuff pulled directly from MT
110    uint64_t currentFenceId;
111    // Maps for tracking key structs related to mem_tracker state
112    unordered_map<VkDescriptorSet, MT_DESCRIPTOR_SET_INFO> descriptorSetMap;
113    // Images and Buffers are 2 objects that can have memory bound to them so they get special treatment
114    unordered_map<uint64_t, MT_OBJ_BINDING_INFO> imageBindingMap;
115    unordered_map<uint64_t, MT_OBJ_BINDING_INFO> bufferBindingMap;
116// MTMERGESOURCE - End of MT stuff
117#endif
118    devExts device_extensions;
119    unordered_set<VkQueue> queues;  // all queues under given device
120    // Global set of all cmdBuffers that are inFlight on this device
121    unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
122    // Layer specific data
123    unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> sampleMap;
124    unordered_map<VkImageView, VkImageViewCreateInfo> imageViewMap;
125    unordered_map<VkImage, IMAGE_NODE> imageMap;
126    unordered_map<VkBufferView, VkBufferViewCreateInfo> bufferViewMap;
127    unordered_map<VkBuffer, BUFFER_NODE> bufferMap;
128    unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap;
129    unordered_map<VkCommandPool, CMD_POOL_INFO> commandPoolMap;
130    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap;
131    unordered_map<VkDescriptorSet, SET_NODE *> setMap;
132    unordered_map<VkDescriptorSetLayout, LAYOUT_NODE *> descriptorSetLayoutMap;
133    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
134    unordered_map<VkDeviceMemory, DEVICE_MEM_INFO> memObjMap;
135    unordered_map<VkFence, FENCE_NODE> fenceMap;
136    unordered_map<VkQueue, QUEUE_NODE> queueMap;
137    unordered_map<VkEvent, EVENT_NODE> eventMap;
138    unordered_map<QueryObject, bool> queryToStateMap;
139    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
140    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
141    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
142    unordered_map<VkFramebuffer, FRAMEBUFFER_NODE> frameBufferMap;
143    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
144    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
145    unordered_map<VkRenderPass, RENDER_PASS_NODE *> renderPassMap;
146    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
147    // Current render pass
148    VkRenderPassBeginInfo renderPassBeginInfo;
149    uint32_t currentSubpass;
150    VkDevice device;
151
152    // Device specific data
153    PHYS_DEV_PROPERTIES_NODE physDevProperties;
154// MTMERGESOURCE - added a couple of fields to constructor initializer
155    layer_data()
156        : report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr),
157#if MTMERGESOURCE
158        currentFenceId(1),
159#endif
160        device_extensions(){};
161};
162
163static const VkLayerProperties cv_global_layers[] = {{
164    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
165}};
166
167template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
168    bool foundLayer = false;
169    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
170        if (!strcmp(createInfo.ppEnabledLayerNames[i], cv_global_layers[0].layerName)) {
171            foundLayer = true;
172        }
173        // This has to be logged to console as we don't have a callback at this point.
174        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
175            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
176                       cv_global_layers[0].layerName);
177        }
178    }
179}
180
181// Code imported from shader_checker
182static void build_def_index(shader_module *);
183
184// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
185// without the caller needing to care too much about the physical SPIRV module layout.
186struct spirv_inst_iter {
187    std::vector<uint32_t>::const_iterator zero;
188    std::vector<uint32_t>::const_iterator it;
189
190    uint32_t len() { return *it >> 16; }
191    uint32_t opcode() { return *it & 0x0ffffu; }
192    uint32_t const &word(unsigned n) { return it[n]; }
193    uint32_t offset() { return (uint32_t)(it - zero); }
194
195    spirv_inst_iter() {}
196
197    spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
198
199    bool operator==(spirv_inst_iter const &other) { return it == other.it; }
200
201    bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
202
203    spirv_inst_iter operator++(int) { /* x++ */
204        spirv_inst_iter ii = *this;
205        it += len();
206        return ii;
207    }
208
209    spirv_inst_iter operator++() { /* ++x; */
210        it += len();
211        return *this;
212    }
213
214    /* The iterator and the value are the same thing. */
215    spirv_inst_iter &operator*() { return *this; }
216    spirv_inst_iter const &operator*() const { return *this; }
217};
218
219struct shader_module {
220    /* the spirv image itself */
221    vector<uint32_t> words;
222    /* a mapping of <id> to the first word of its def. this is useful because walking type
223     * trees, constant expressions, etc requires jumping all over the instruction stream.
224     */
225    unordered_map<unsigned, unsigned> def_index;
226
227    shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
228        : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
229          def_index() {
230
231        build_def_index(this);
232    }
233
234    /* expose begin() / end() to enable range-based for */
235    spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
236    spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }         /* just past last insn */
237    /* given an offset into the module, produce an iterator there. */
238    spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
239
240    /* gets an iterator to the definition of an id */
241    spirv_inst_iter get_def(unsigned id) const {
242        auto it = def_index.find(id);
243        if (it == def_index.end()) {
244            return end();
245        }
246        return at(it->second);
247    }
248};
249
250// TODO : Do we need to guard access to layer_data_map w/ lock?
251static unordered_map<void *, layer_data *> layer_data_map;
252
253// TODO : This can be much smarter, using separate locks for separate global data
254static int globalLockInitialized = 0;
255static loader_platform_thread_mutex globalLock;
256#if MTMERGESOURCE
257// MTMERGESOURCE - start of direct pull
258static VkPhysicalDeviceMemoryProperties memProps;
259
260static void clear_cmd_buf_and_mem_references(layer_data *my_data, const VkCommandBuffer cb);
261
262#define MAX_BINDING 0xFFFFFFFF
263
264static MT_OBJ_BINDING_INFO *get_object_binding_info(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
265    MT_OBJ_BINDING_INFO *retValue = NULL;
266    switch (type) {
267    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
268        auto it = my_data->imageBindingMap.find(handle);
269        if (it != my_data->imageBindingMap.end())
270            return &(*it).second;
271        break;
272    }
273    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
274        auto it = my_data->bufferBindingMap.find(handle);
275        if (it != my_data->bufferBindingMap.end())
276            return &(*it).second;
277        break;
278    }
279    default:
280        break;
281    }
282    return retValue;
283}
284// MTMERGESOURCE - end section
285#endif
286template layer_data *get_my_data_ptr<layer_data>(void *data_key, std::unordered_map<void *, layer_data *> &data_map);
287
288// prototype
289static GLOBAL_CB_NODE *getCBNode(layer_data *, const VkCommandBuffer);
290
291#if MTMERGESOURCE
292static void delete_queue_info_list(layer_data *my_data) {
293    // Process queue list, cleaning up each entry before deleting
294    my_data->queueMap.clear();
295}
296
297// Delete CBInfo from container and clear mem references to CB
298static void delete_cmd_buf_info(layer_data *my_data, VkCommandPool commandPool, const VkCommandBuffer cb) {
299    clear_cmd_buf_and_mem_references(my_data, cb);
300    // Delete the CBInfo info
301    my_data->commandPoolMap[commandPool].commandBuffers.remove(cb);
302    my_data->commandBufferMap.erase(cb);
303}
304
305static void add_object_binding_info(layer_data *my_data, const uint64_t handle, const VkDebugReportObjectTypeEXT type,
306                                    const VkDeviceMemory mem) {
307    switch (type) {
308    // Buffers and images are unique as their CreateInfo is in container struct
309    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
310        auto pCI = &my_data->bufferBindingMap[handle];
311        pCI->mem = mem;
312        break;
313    }
314    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
315        auto pCI = &my_data->imageBindingMap[handle];
316        pCI->mem = mem;
317        break;
318    }
319    default:
320        break;
321    }
322}
323
324static void add_object_create_info(layer_data *my_data, const uint64_t handle, const VkDebugReportObjectTypeEXT type,
325                                   const void *pCreateInfo) {
326    // TODO : For any CreateInfo struct that has ptrs, need to deep copy them and appropriately clean up on Destroy
327    switch (type) {
328    // Buffers and images are unique as their CreateInfo is in container struct
329    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
330        auto pCI = &my_data->bufferBindingMap[handle];
331        memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
332        memcpy(&pCI->create_info.buffer, pCreateInfo, sizeof(VkBufferCreateInfo));
333        break;
334    }
335    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
336        auto pCI = &my_data->imageBindingMap[handle];
337        memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
338        memcpy(&pCI->create_info.image, pCreateInfo, sizeof(VkImageCreateInfo));
339        break;
340    }
341    // Swap Chain is very unique, use my_data->imageBindingMap, but copy in
342    // SwapChainCreatInfo's usage flags and set the mem value to a unique key. These is used by
343    // vkCreateImageView and internal mem_tracker routines to distinguish swap chain images
344    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT: {
345        auto pCI = &my_data->imageBindingMap[handle];
346        memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
347        pCI->mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
348        pCI->valid = false;
349        pCI->create_info.image.usage =
350            const_cast<VkSwapchainCreateInfoKHR *>(static_cast<const VkSwapchainCreateInfoKHR *>(pCreateInfo))->imageUsage;
351        break;
352    }
353    default:
354        break;
355    }
356}
357
358// Add a fence, creating one if necessary to our list of fences/fenceIds
359static VkBool32 add_fence_info(layer_data *my_data, VkFence fence, VkQueue queue, uint64_t *fenceId) {
360    VkBool32 skipCall = VK_FALSE;
361    *fenceId = my_data->currentFenceId++;
362
363    // If no fence, create an internal fence to track the submissions
364    if (fence != VK_NULL_HANDLE) {
365        my_data->fenceMap[fence].fenceId = *fenceId;
366        my_data->fenceMap[fence].queue = queue;
367        // Validate that fence is in UNSIGNALED state
368        VkFenceCreateInfo *pFenceCI = &(my_data->fenceMap[fence].createInfo);
369        if (pFenceCI->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
370            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
371                               (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
372                               "Fence %#" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
373                               (uint64_t)fence);
374        }
375    } else {
376        // TODO : Do we need to create an internal fence here for tracking purposes?
377    }
378    // Update most recently submitted fence and fenceId for Queue
379    my_data->queueMap[queue].lastSubmittedId = *fenceId;
380    return skipCall;
381}
382
383// Remove a fenceInfo from our list of fences/fenceIds
384static void delete_fence_info(layer_data *my_data, VkFence fence) { my_data->fenceMap.erase(fence); }
385
386// Record information when a fence is known to be signalled
387static void update_fence_tracking(layer_data *my_data, VkFence fence) {
388    auto fence_item = my_data->fenceMap.find(fence);
389    if (fence_item != my_data->fenceMap.end()) {
390        FENCE_NODE *pCurFenceInfo = &(*fence_item).second;
391        VkQueue queue = pCurFenceInfo->queue;
392        auto queue_item = my_data->queueMap.find(queue);
393        if (queue_item != my_data->queueMap.end()) {
394            QUEUE_NODE *pQueueInfo = &(*queue_item).second;
395            if (pQueueInfo->lastRetiredId < pCurFenceInfo->fenceId) {
396                pQueueInfo->lastRetiredId = pCurFenceInfo->fenceId;
397            }
398        }
399    }
400
401    // Update fence state in fenceCreateInfo structure
402    auto pFCI = &(my_data->fenceMap[fence].createInfo);
403    pFCI->flags = static_cast<VkFenceCreateFlags>(pFCI->flags | VK_FENCE_CREATE_SIGNALED_BIT);
404}
405
406// Helper routine that updates the fence list for a specific queue to all-retired
407static void retire_queue_fences(layer_data *my_data, VkQueue queue) {
408    QUEUE_NODE *pQueueInfo = &my_data->queueMap[queue];
409    // Set queue's lastRetired to lastSubmitted indicating all fences completed
410    pQueueInfo->lastRetiredId = pQueueInfo->lastSubmittedId;
411}
412
413// Helper routine that updates all queues to all-retired
414static void retire_device_fences(layer_data *my_data, VkDevice device) {
415    // Process each queue for device
416    // TODO: Add multiple device support
417    for (auto ii = my_data->queueMap.begin(); ii != my_data->queueMap.end(); ++ii) {
418        // Set queue's lastRetired to lastSubmitted indicating all fences completed
419        QUEUE_NODE *pQueueInfo = &(*ii).second;
420        pQueueInfo->lastRetiredId = pQueueInfo->lastSubmittedId;
421    }
422}
423
424// Helper function to validate correct usage bits set for buffers or images
425//  Verify that (actual & desired) flags != 0 or,
426//   if strict is true, verify that (actual & desired) flags == desired
427//  In case of error, report it via dbg callbacks
428static VkBool32 validate_usage_flags(layer_data *my_data, void *disp_obj, VkFlags actual, VkFlags desired, VkBool32 strict,
429                                     uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str,
430                                     char const *func_name, char const *usage_str) {
431    VkBool32 correct_usage = VK_FALSE;
432    VkBool32 skipCall = VK_FALSE;
433    if (strict)
434        correct_usage = ((actual & desired) == desired);
435    else
436        correct_usage = ((actual & desired) != 0);
437    if (!correct_usage) {
438        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
439                           MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s %#" PRIxLEAST64
440                                                               " used by %s. In this case, %s should have %s set during creation.",
441                           ty_str, obj_handle, func_name, ty_str, usage_str);
442    }
443    return skipCall;
444}
445
446// Helper function to validate usage flags for images
447// Pulls image info and then sends actual vs. desired usage off to helper above where
448//  an error will be flagged if usage is not correct
449static VkBool32 validate_image_usage_flags(layer_data *my_data, void *disp_obj, VkImage image, VkFlags desired, VkBool32 strict,
450                                           char const *func_name, char const *usage_string) {
451    VkBool32 skipCall = VK_FALSE;
452    MT_OBJ_BINDING_INFO *pBindInfo = get_object_binding_info(my_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
453    if (pBindInfo) {
454        skipCall = validate_usage_flags(my_data, disp_obj, pBindInfo->create_info.image.usage, desired, strict, (uint64_t)image,
455                                        VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "image", func_name, usage_string);
456    }
457    return skipCall;
458}
459
460// Helper function to validate usage flags for buffers
461// Pulls buffer info and then sends actual vs. desired usage off to helper above where
462//  an error will be flagged if usage is not correct
463static VkBool32 validate_buffer_usage_flags(layer_data *my_data, void *disp_obj, VkBuffer buffer, VkFlags desired, VkBool32 strict,
464                                            char const *func_name, char const *usage_string) {
465    VkBool32 skipCall = VK_FALSE;
466    MT_OBJ_BINDING_INFO *pBindInfo = get_object_binding_info(my_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
467    if (pBindInfo) {
468        skipCall = validate_usage_flags(my_data, disp_obj, pBindInfo->create_info.buffer.usage, desired, strict, (uint64_t)buffer,
469                                        VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "buffer", func_name, usage_string);
470    }
471    return skipCall;
472}
473
474// Return ptr to info in map container containing mem, or NULL if not found
475//  Calls to this function should be wrapped in mutex
476static DEVICE_MEM_INFO *get_mem_obj_info(layer_data *dev_data, const VkDeviceMemory mem) {
477    auto item = dev_data->memObjMap.find(mem);
478    if (item != dev_data->memObjMap.end()) {
479        return &(*item).second;
480    } else {
481        return NULL;
482    }
483}
484
485static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
486                             const VkMemoryAllocateInfo *pAllocateInfo) {
487    assert(object != NULL);
488
489    memcpy(&my_data->memObjMap[mem].allocInfo, pAllocateInfo, sizeof(VkMemoryAllocateInfo));
490    // TODO:  Update for real hardware, actually process allocation info structures
491    my_data->memObjMap[mem].allocInfo.pNext = NULL;
492    my_data->memObjMap[mem].object = object;
493    my_data->memObjMap[mem].refCount = 0;
494    my_data->memObjMap[mem].mem = mem;
495    my_data->memObjMap[mem].image = VK_NULL_HANDLE;
496    my_data->memObjMap[mem].memRange.offset = 0;
497    my_data->memObjMap[mem].memRange.size = 0;
498    my_data->memObjMap[mem].pData = 0;
499    my_data->memObjMap[mem].pDriverData = 0;
500    my_data->memObjMap[mem].valid = false;
501}
502
503static VkBool32 validate_memory_is_valid(layer_data *dev_data, VkDeviceMemory mem, const char *functionName,
504                                         VkImage image = VK_NULL_HANDLE) {
505    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
506        MT_OBJ_BINDING_INFO *pBindInfo =
507            get_object_binding_info(dev_data, reinterpret_cast<const uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
508        if (pBindInfo && !pBindInfo->valid) {
509            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
510                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
511                           "%s: Cannot read invalid swapchain image %" PRIx64 ", please fill the memory before using.",
512                           functionName, (uint64_t)(image));
513        }
514    } else {
515        DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
516        if (pMemObj && !pMemObj->valid) {
517            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
518                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
519                           "%s: Cannot read invalid memory %" PRIx64 ", please fill the memory before using.", functionName,
520                           (uint64_t)(mem));
521        }
522    }
523    return false;
524}
525
526static void set_memory_valid(layer_data *dev_data, VkDeviceMemory mem, bool valid, VkImage image = VK_NULL_HANDLE) {
527    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
528        MT_OBJ_BINDING_INFO *pBindInfo =
529            get_object_binding_info(dev_data, reinterpret_cast<const uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
530        if (pBindInfo) {
531            pBindInfo->valid = valid;
532        }
533    } else {
534        DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
535        if (pMemObj) {
536            pMemObj->valid = valid;
537        }
538    }
539}
540
541// Find CB Info and add mem reference to list container
542// Find Mem Obj Info and add CB reference to list container
543static VkBool32 update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
544                                                  const char *apiName) {
545    VkBool32 skipCall = VK_FALSE;
546
547    // Skip validation if this image was created through WSI
548    if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
549
550        // First update CB binding in MemObj mini CB list
551        DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem);
552        if (pMemInfo) {
553            // Search for cmd buffer object in memory object's binding list
554            VkBool32 found = VK_FALSE;
555            if (pMemInfo->pCommandBufferBindings.size() > 0) {
556                for (list<VkCommandBuffer>::iterator it = pMemInfo->pCommandBufferBindings.begin();
557                     it != pMemInfo->pCommandBufferBindings.end(); ++it) {
558                    if ((*it) == cb) {
559                        found = VK_TRUE;
560                        break;
561                    }
562                }
563            }
564            // If not present, add to list
565            if (found == VK_FALSE) {
566                pMemInfo->pCommandBufferBindings.push_front(cb);
567                pMemInfo->refCount++;
568            }
569            // Now update CBInfo's Mem reference list
570            GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
571            // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
572            if (pCBNode) {
573                // Search for memory object in cmd buffer's reference list
574                VkBool32 found = VK_FALSE;
575                if (pCBNode->pMemObjList.size() > 0) {
576                    for (auto it = pCBNode->pMemObjList.begin(); it != pCBNode->pMemObjList.end(); ++it) {
577                        if ((*it) == mem) {
578                            found = VK_TRUE;
579                            break;
580                        }
581                    }
582                }
583                // If not present, add to list
584                if (found == VK_FALSE) {
585                    pCBNode->pMemObjList.push_front(mem);
586                }
587            }
588        }
589    }
590    return skipCall;
591}
592
593// Free bindings related to CB
594static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
595    GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
596
597    if (pCBNode) {
598        if (pCBNode->pMemObjList.size() > 0) {
599            list<VkDeviceMemory> mem_obj_list = pCBNode->pMemObjList;
600            for (list<VkDeviceMemory>::iterator it = mem_obj_list.begin(); it != mem_obj_list.end(); ++it) {
601                DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, *it);
602                if (pInfo) {
603                    pInfo->pCommandBufferBindings.remove(cb);
604                    pInfo->refCount--;
605                }
606            }
607            pCBNode->pMemObjList.clear();
608        }
609        pCBNode->activeDescriptorSets.clear();
610        pCBNode->validate_functions.clear();
611    }
612}
613
614// Delete the entire CB list
615static void delete_cmd_buf_info_list(layer_data *my_data) {
616    for (auto &cb_node : my_data->commandBufferMap) {
617        clear_cmd_buf_and_mem_references(my_data, cb_node.first);
618    }
619    my_data->commandBufferMap.clear();
620}
621
622// For given MemObjInfo, report Obj & CB bindings
623static VkBool32 reportMemReferencesAndCleanUp(layer_data *dev_data, DEVICE_MEM_INFO *pMemObjInfo) {
624    VkBool32 skipCall = VK_FALSE;
625    size_t cmdBufRefCount = pMemObjInfo->pCommandBufferBindings.size();
626    size_t objRefCount = pMemObjInfo->pObjBindings.size();
627
628    if ((pMemObjInfo->pCommandBufferBindings.size()) != 0) {
629        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
630                           (uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
631                           "Attempting to free memory object %#" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER
632                           " references",
633                           (uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount));
634    }
635
636    if (cmdBufRefCount > 0 && pMemObjInfo->pCommandBufferBindings.size() > 0) {
637        for (list<VkCommandBuffer>::const_iterator it = pMemObjInfo->pCommandBufferBindings.begin();
638             it != pMemObjInfo->pCommandBufferBindings.end(); ++it) {
639            // TODO : CommandBuffer should be source Obj here
640            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
641                    (uint64_t)(*it), __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
642                    "Command Buffer %p still has a reference to mem obj %#" PRIxLEAST64, (*it), (uint64_t)pMemObjInfo->mem);
643        }
644        // Clear the list of hanging references
645        pMemObjInfo->pCommandBufferBindings.clear();
646    }
647
648    if (objRefCount > 0 && pMemObjInfo->pObjBindings.size() > 0) {
649        for (auto it = pMemObjInfo->pObjBindings.begin(); it != pMemObjInfo->pObjBindings.end(); ++it) {
650            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, it->type, it->handle, __LINE__,
651                    MEMTRACK_FREED_MEM_REF, "MEM", "VK Object %#" PRIxLEAST64 " still has a reference to mem obj %#" PRIxLEAST64,
652                    it->handle, (uint64_t)pMemObjInfo->mem);
653        }
654        // Clear the list of hanging references
655        pMemObjInfo->pObjBindings.clear();
656    }
657    return skipCall;
658}
659
660static VkBool32 deleteMemObjInfo(layer_data *my_data, void *object, VkDeviceMemory mem) {
661    VkBool32 skipCall = VK_FALSE;
662    auto item = my_data->memObjMap.find(mem);
663    if (item != my_data->memObjMap.end()) {
664        my_data->memObjMap.erase(item);
665    } else {
666        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
667                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
668                           "Request to delete memory object %#" PRIxLEAST64 " not present in memory Object Map", (uint64_t)mem);
669    }
670    return skipCall;
671}
672
673// Check if fence for given CB is completed
674static bool checkCBCompleted(layer_data *my_data, const VkCommandBuffer cb, bool *complete) {
675    GLOBAL_CB_NODE *pCBNode = getCBNode(my_data, cb);
676    VkBool32 skipCall = false;
677    *complete = true;
678
679    if (pCBNode) {
680        if (pCBNode->lastSubmittedQueue != NULL) {
681            VkQueue queue = pCBNode->lastSubmittedQueue;
682            QUEUE_NODE *pQueueInfo = &my_data->queueMap[queue];
683            if (pCBNode->fenceId > pQueueInfo->lastRetiredId) {
684                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
685                                   VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)cb, __LINE__, MEMTRACK_NONE, "MEM",
686                                   "fence %#" PRIxLEAST64 " for CB %p has not been checked for completion",
687                                   (uint64_t)pCBNode->lastSubmittedFence, cb);
688                *complete = false;
689            }
690        }
691    }
692    return skipCall;
693}
694
695static VkBool32 freeMemObjInfo(layer_data *dev_data, void *object, VkDeviceMemory mem, VkBool32 internal) {
696    VkBool32 skipCall = VK_FALSE;
697    // Parse global list to find info w/ mem
698    DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
699    if (pInfo) {
700        if (pInfo->allocInfo.allocationSize == 0 && !internal) {
701            // TODO: Verify against Valid Use section
702            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
703                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
704                               "Attempting to free memory associated with a Persistent Image, %#" PRIxLEAST64 ", "
705                               "this should not be explicitly freed\n",
706                               (uint64_t)mem);
707        } else {
708            // Clear any CB bindings for completed CBs
709            //   TODO : Is there a better place to do this?
710
711            bool commandBufferComplete = false;
712            assert(pInfo->object != VK_NULL_HANDLE);
713            list<VkCommandBuffer>::iterator it = pInfo->pCommandBufferBindings.begin();
714            list<VkCommandBuffer>::iterator temp;
715            while (pInfo->pCommandBufferBindings.size() > 0 && it != pInfo->pCommandBufferBindings.end()) {
716                skipCall |= checkCBCompleted(dev_data, *it, &commandBufferComplete);
717                if (commandBufferComplete) {
718                    temp = it;
719                    ++temp;
720                    clear_cmd_buf_and_mem_references(dev_data, *it);
721                    it = temp;
722                } else {
723                    ++it;
724                }
725            }
726
727            // Now verify that no references to this mem obj remain and remove bindings
728            if (0 != pInfo->refCount) {
729                skipCall |= reportMemReferencesAndCleanUp(dev_data, pInfo);
730            }
731            // Delete mem obj info
732            skipCall |= deleteMemObjInfo(dev_data, object, mem);
733        }
734    }
735    return skipCall;
736}
737
738static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
739    switch (type) {
740    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
741        return "image";
742    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
743        return "buffer";
744    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
745        return "swapchain";
746    default:
747        return "unknown";
748    }
749}
750
751// Remove object binding performs 3 tasks:
752// 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it
753// 2. Decrement refCount for MemObjInfo
754// 3. Clear mem binding for image/buffer by setting its handle to 0
755// TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized?
756static VkBool32 clear_object_binding(layer_data *dev_data, void *dispObj, uint64_t handle, VkDebugReportObjectTypeEXT type) {
757    // TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately
758    VkBool32 skipCall = VK_FALSE;
759    MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(dev_data, handle, type);
760    if (pObjBindInfo) {
761        DEVICE_MEM_INFO *pMemObjInfo = get_mem_obj_info(dev_data, pObjBindInfo->mem);
762        // TODO : Make sure this is a reasonable way to reset mem binding
763        pObjBindInfo->mem = VK_NULL_HANDLE;
764        if (pMemObjInfo) {
765            // This obj is bound to a memory object. Remove the reference to this object in that memory object's list, decrement the
766            // memObj's refcount
767            // and set the objects memory binding pointer to NULL.
768            VkBool32 clearSucceeded = VK_FALSE;
769            for (auto it = pMemObjInfo->pObjBindings.begin(); it != pMemObjInfo->pObjBindings.end(); ++it) {
770                if ((it->handle == handle) && (it->type == type)) {
771                    pMemObjInfo->refCount--;
772                    pMemObjInfo->pObjBindings.erase(it);
773                    clearSucceeded = VK_TRUE;
774                    break;
775                }
776            }
777            if (VK_FALSE == clearSucceeded) {
778                skipCall |=
779                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
780                            "MEM", "While trying to clear mem binding for %s obj %#" PRIxLEAST64
781                                   ", unable to find that object referenced by mem obj %#" PRIxLEAST64,
782                            object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem);
783            }
784        }
785    }
786    return skipCall;
787}
788
789// For NULL mem case, output warning
790// Make sure given object is in global object map
791//  IF a previous binding existed, output validation error
792//  Otherwise, add reference from objectInfo to memoryInfo
793//  Add reference off of objInfo
794//  device is required for error logging, need a dispatchable
795//  object for that.
796static VkBool32 set_mem_binding(layer_data *dev_data, void *dispatch_object, VkDeviceMemory mem, uint64_t handle,
797                                VkDebugReportObjectTypeEXT type, const char *apiName) {
798    VkBool32 skipCall = VK_FALSE;
799    // Handle NULL case separately, just clear previous binding & decrement reference
800    if (mem == VK_NULL_HANDLE) {
801        // TODO: Verify against Valid Use section of spec.
802        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ,
803                           "MEM", "In %s, attempting to Bind Obj(%#" PRIxLEAST64 ") to NULL", apiName, handle);
804    } else {
805        MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(dev_data, handle, type);
806        if (!pObjBindInfo) {
807            skipCall |=
808                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS,
809                        "MEM", "In %s, attempting to update Binding of %s Obj(%#" PRIxLEAST64 ") that's not in global list()",
810                        object_type_to_string(type), apiName, handle);
811        } else {
812            // non-null case so should have real mem obj
813            DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem);
814            if (pMemInfo) {
815                // TODO : Need to track mem binding for obj and report conflict here
816                DEVICE_MEM_INFO *pPrevBinding = get_mem_obj_info(dev_data, pObjBindInfo->mem);
817                if (pPrevBinding != NULL) {
818                    skipCall |=
819                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
820                                (uint64_t)mem, __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
821                                "In %s, attempting to bind memory (%#" PRIxLEAST64 ") to object (%#" PRIxLEAST64
822                                ") which has already been bound to mem object %#" PRIxLEAST64,
823                                apiName, (uint64_t)mem, handle, (uint64_t)pPrevBinding->mem);
824                } else {
825                    MT_OBJ_HANDLE_TYPE oht;
826                    oht.handle = handle;
827                    oht.type = type;
828                    pMemInfo->pObjBindings.push_front(oht);
829                    pMemInfo->refCount++;
830                    // For image objects, make sure default memory state is correctly set
831                    // TODO : What's the best/correct way to handle this?
832                    if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
833                        VkImageCreateInfo ici = pObjBindInfo->create_info.image;
834                        if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
835                            // TODO::  More memory state transition stuff.
836                        }
837                    }
838                    pObjBindInfo->mem = mem;
839                }
840            }
841        }
842    }
843    return skipCall;
844}
845
846// For NULL mem case, clear any previous binding Else...
847// Make sure given object is in its object map
848//  IF a previous binding existed, update binding
849//  Add reference from objectInfo to memoryInfo
850//  Add reference off of object's binding info
851// Return VK_TRUE if addition is successful, VK_FALSE otherwise
852static VkBool32 set_sparse_mem_binding(layer_data *dev_data, void *dispObject, VkDeviceMemory mem, uint64_t handle,
853                                       VkDebugReportObjectTypeEXT type, const char *apiName) {
854    VkBool32 skipCall = VK_FALSE;
855    // Handle NULL case separately, just clear previous binding & decrement reference
856    if (mem == VK_NULL_HANDLE) {
857        skipCall = clear_object_binding(dev_data, dispObject, handle, type);
858    } else {
859        MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(dev_data, handle, type);
860        if (!pObjBindInfo) {
861            skipCall |= log_msg(
862                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS, "MEM",
863                "In %s, attempting to update Binding of Obj(%#" PRIxLEAST64 ") that's not in global list()", apiName, handle);
864        }
865        // non-null case so should have real mem obj
866        DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
867        if (pInfo) {
868            // Search for object in memory object's binding list
869            VkBool32 found = VK_FALSE;
870            if (pInfo->pObjBindings.size() > 0) {
871                for (auto it = pInfo->pObjBindings.begin(); it != pInfo->pObjBindings.end(); ++it) {
872                    if (((*it).handle == handle) && ((*it).type == type)) {
873                        found = VK_TRUE;
874                        break;
875                    }
876                }
877            }
878            // If not present, add to list
879            if (found == VK_FALSE) {
880                MT_OBJ_HANDLE_TYPE oht;
881                oht.handle = handle;
882                oht.type = type;
883                pInfo->pObjBindings.push_front(oht);
884                pInfo->refCount++;
885            }
886            // Need to set mem binding for this object
887            pObjBindInfo->mem = mem;
888        }
889    }
890    return skipCall;
891}
892
893template <typename T>
894void print_object_map_members(layer_data *my_data, void *dispObj, T const &objectName, VkDebugReportObjectTypeEXT objectType,
895                              const char *objectStr) {
896    for (auto const &element : objectName) {
897        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objectType, 0, __LINE__, MEMTRACK_NONE, "MEM",
898                "    %s Object list contains %s Object %#" PRIxLEAST64 " ", objectStr, objectStr, element.first);
899    }
900}
901
902// For given Object, get 'mem' obj that it's bound to or NULL if no binding
903static VkBool32 get_mem_binding_from_object(layer_data *my_data, void *dispObj, const uint64_t handle,
904                                            const VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) {
905    VkBool32 skipCall = VK_FALSE;
906    *mem = VK_NULL_HANDLE;
907    MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(my_data, handle, type);
908    if (pObjBindInfo) {
909        if (pObjBindInfo->mem) {
910            *mem = pObjBindInfo->mem;
911        } else {
912            skipCall =
913                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS,
914                        "MEM", "Trying to get mem binding for object %#" PRIxLEAST64 " but object has no mem binding", handle);
915        }
916    } else {
917        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
918                           "MEM", "Trying to get mem binding for object %#" PRIxLEAST64 " but no such object in %s list", handle,
919                           object_type_to_string(type));
920    }
921    return skipCall;
922}
923
924// Print details of MemObjInfo list
925static void print_mem_list(layer_data *dev_data, void *dispObj) {
926    DEVICE_MEM_INFO *pInfo = NULL;
927
928    // Early out if info is not requested
929    if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
930        return;
931    }
932
933    // Just printing each msg individually for now, may want to package these into single large print
934    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
935            MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
936            dev_data->memObjMap.size());
937    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
938            MEMTRACK_NONE, "MEM", "=============================");
939
940    if (dev_data->memObjMap.size() <= 0)
941        return;
942
943    for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
944        pInfo = &(*ii).second;
945
946        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
947                __LINE__, MEMTRACK_NONE, "MEM", "    ===MemObjInfo at %p===", (void *)pInfo);
948        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
949                __LINE__, MEMTRACK_NONE, "MEM", "    Mem object: %#" PRIxLEAST64, (uint64_t)(pInfo->mem));
950        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
951                __LINE__, MEMTRACK_NONE, "MEM", "    Ref Count: %u", pInfo->refCount);
952        if (0 != pInfo->allocInfo.allocationSize) {
953            string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&pInfo->allocInfo, "MEM(INFO):         ");
954            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
955                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
956        } else {
957            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
958                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
959        }
960
961        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
962                __LINE__, MEMTRACK_NONE, "MEM", "    VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:",
963                pInfo->pObjBindings.size());
964        if (pInfo->pObjBindings.size() > 0) {
965            for (list<MT_OBJ_HANDLE_TYPE>::iterator it = pInfo->pObjBindings.begin(); it != pInfo->pObjBindings.end(); ++it) {
966                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
967                        0, __LINE__, MEMTRACK_NONE, "MEM", "       VK OBJECT %" PRIu64, it->handle);
968            }
969        }
970
971        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
972                __LINE__, MEMTRACK_NONE, "MEM",
973                "    VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements",
974                pInfo->pCommandBufferBindings.size());
975        if (pInfo->pCommandBufferBindings.size() > 0) {
976            for (list<VkCommandBuffer>::iterator it = pInfo->pCommandBufferBindings.begin();
977                 it != pInfo->pCommandBufferBindings.end(); ++it) {
978                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
979                        0, __LINE__, MEMTRACK_NONE, "MEM", "      VK CB %p", (*it));
980            }
981        }
982    }
983}
984
985static void printCBList(layer_data *my_data, void *dispObj) {
986    GLOBAL_CB_NODE *pCBInfo = NULL;
987
988    // Early out if info is not requested
989    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
990        return;
991    }
992
993    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
994            MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
995            my_data->commandBufferMap.size());
996    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
997            MEMTRACK_NONE, "MEM", "==================");
998
999    if (my_data->commandBufferMap.size() <= 0)
1000        return;
1001
1002    for (auto &cb_node : my_data->commandBufferMap) {
1003        pCBInfo = cb_node.second;
1004
1005        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
1006                __LINE__, MEMTRACK_NONE, "MEM", "    CB Info (%p) has CB %p, fenceId %" PRIx64 ", and fence %#" PRIxLEAST64,
1007                (void *)pCBInfo, (void *)pCBInfo->commandBuffer, pCBInfo->fenceId, (uint64_t)pCBInfo->lastSubmittedFence);
1008
1009        if (pCBInfo->pMemObjList.size() <= 0)
1010            continue;
1011        for (list<VkDeviceMemory>::iterator it = pCBInfo->pMemObjList.begin(); it != pCBInfo->pMemObjList.end(); ++it) {
1012            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
1013                    __LINE__, MEMTRACK_NONE, "MEM", "      Mem obj %" PRIu64, (uint64_t)(*it));
1014        }
1015    }
1016}
1017
1018#endif
1019
1020// Return a string representation of CMD_TYPE enum
1021static string cmdTypeToString(CMD_TYPE cmd) {
1022    switch (cmd) {
1023    case CMD_BINDPIPELINE:
1024        return "CMD_BINDPIPELINE";
1025    case CMD_BINDPIPELINEDELTA:
1026        return "CMD_BINDPIPELINEDELTA";
1027    case CMD_SETVIEWPORTSTATE:
1028        return "CMD_SETVIEWPORTSTATE";
1029    case CMD_SETLINEWIDTHSTATE:
1030        return "CMD_SETLINEWIDTHSTATE";
1031    case CMD_SETDEPTHBIASSTATE:
1032        return "CMD_SETDEPTHBIASSTATE";
1033    case CMD_SETBLENDSTATE:
1034        return "CMD_SETBLENDSTATE";
1035    case CMD_SETDEPTHBOUNDSSTATE:
1036        return "CMD_SETDEPTHBOUNDSSTATE";
1037    case CMD_SETSTENCILREADMASKSTATE:
1038        return "CMD_SETSTENCILREADMASKSTATE";
1039    case CMD_SETSTENCILWRITEMASKSTATE:
1040        return "CMD_SETSTENCILWRITEMASKSTATE";
1041    case CMD_SETSTENCILREFERENCESTATE:
1042        return "CMD_SETSTENCILREFERENCESTATE";
1043    case CMD_BINDDESCRIPTORSETS:
1044        return "CMD_BINDDESCRIPTORSETS";
1045    case CMD_BINDINDEXBUFFER:
1046        return "CMD_BINDINDEXBUFFER";
1047    case CMD_BINDVERTEXBUFFER:
1048        return "CMD_BINDVERTEXBUFFER";
1049    case CMD_DRAW:
1050        return "CMD_DRAW";
1051    case CMD_DRAWINDEXED:
1052        return "CMD_DRAWINDEXED";
1053    case CMD_DRAWINDIRECT:
1054        return "CMD_DRAWINDIRECT";
1055    case CMD_DRAWINDEXEDINDIRECT:
1056        return "CMD_DRAWINDEXEDINDIRECT";
1057    case CMD_DISPATCH:
1058        return "CMD_DISPATCH";
1059    case CMD_DISPATCHINDIRECT:
1060        return "CMD_DISPATCHINDIRECT";
1061    case CMD_COPYBUFFER:
1062        return "CMD_COPYBUFFER";
1063    case CMD_COPYIMAGE:
1064        return "CMD_COPYIMAGE";
1065    case CMD_BLITIMAGE:
1066        return "CMD_BLITIMAGE";
1067    case CMD_COPYBUFFERTOIMAGE:
1068        return "CMD_COPYBUFFERTOIMAGE";
1069    case CMD_COPYIMAGETOBUFFER:
1070        return "CMD_COPYIMAGETOBUFFER";
1071    case CMD_CLONEIMAGEDATA:
1072        return "CMD_CLONEIMAGEDATA";
1073    case CMD_UPDATEBUFFER:
1074        return "CMD_UPDATEBUFFER";
1075    case CMD_FILLBUFFER:
1076        return "CMD_FILLBUFFER";
1077    case CMD_CLEARCOLORIMAGE:
1078        return "CMD_CLEARCOLORIMAGE";
1079    case CMD_CLEARATTACHMENTS:
1080        return "CMD_CLEARCOLORATTACHMENT";
1081    case CMD_CLEARDEPTHSTENCILIMAGE:
1082        return "CMD_CLEARDEPTHSTENCILIMAGE";
1083    case CMD_RESOLVEIMAGE:
1084        return "CMD_RESOLVEIMAGE";
1085    case CMD_SETEVENT:
1086        return "CMD_SETEVENT";
1087    case CMD_RESETEVENT:
1088        return "CMD_RESETEVENT";
1089    case CMD_WAITEVENTS:
1090        return "CMD_WAITEVENTS";
1091    case CMD_PIPELINEBARRIER:
1092        return "CMD_PIPELINEBARRIER";
1093    case CMD_BEGINQUERY:
1094        return "CMD_BEGINQUERY";
1095    case CMD_ENDQUERY:
1096        return "CMD_ENDQUERY";
1097    case CMD_RESETQUERYPOOL:
1098        return "CMD_RESETQUERYPOOL";
1099    case CMD_COPYQUERYPOOLRESULTS:
1100        return "CMD_COPYQUERYPOOLRESULTS";
1101    case CMD_WRITETIMESTAMP:
1102        return "CMD_WRITETIMESTAMP";
1103    case CMD_INITATOMICCOUNTERS:
1104        return "CMD_INITATOMICCOUNTERS";
1105    case CMD_LOADATOMICCOUNTERS:
1106        return "CMD_LOADATOMICCOUNTERS";
1107    case CMD_SAVEATOMICCOUNTERS:
1108        return "CMD_SAVEATOMICCOUNTERS";
1109    case CMD_BEGINRENDERPASS:
1110        return "CMD_BEGINRENDERPASS";
1111    case CMD_ENDRENDERPASS:
1112        return "CMD_ENDRENDERPASS";
1113    default:
1114        return "UNKNOWN";
1115    }
1116}
1117
1118// SPIRV utility functions
1119static void build_def_index(shader_module *module) {
1120    for (auto insn : *module) {
1121        switch (insn.opcode()) {
1122        /* Types */
1123        case spv::OpTypeVoid:
1124        case spv::OpTypeBool:
1125        case spv::OpTypeInt:
1126        case spv::OpTypeFloat:
1127        case spv::OpTypeVector:
1128        case spv::OpTypeMatrix:
1129        case spv::OpTypeImage:
1130        case spv::OpTypeSampler:
1131        case spv::OpTypeSampledImage:
1132        case spv::OpTypeArray:
1133        case spv::OpTypeRuntimeArray:
1134        case spv::OpTypeStruct:
1135        case spv::OpTypeOpaque:
1136        case spv::OpTypePointer:
1137        case spv::OpTypeFunction:
1138        case spv::OpTypeEvent:
1139        case spv::OpTypeDeviceEvent:
1140        case spv::OpTypeReserveId:
1141        case spv::OpTypeQueue:
1142        case spv::OpTypePipe:
1143            module->def_index[insn.word(1)] = insn.offset();
1144            break;
1145
1146        /* Fixed constants */
1147        case spv::OpConstantTrue:
1148        case spv::OpConstantFalse:
1149        case spv::OpConstant:
1150        case spv::OpConstantComposite:
1151        case spv::OpConstantSampler:
1152        case spv::OpConstantNull:
1153            module->def_index[insn.word(2)] = insn.offset();
1154            break;
1155
1156        /* Specialization constants */
1157        case spv::OpSpecConstantTrue:
1158        case spv::OpSpecConstantFalse:
1159        case spv::OpSpecConstant:
1160        case spv::OpSpecConstantComposite:
1161        case spv::OpSpecConstantOp:
1162            module->def_index[insn.word(2)] = insn.offset();
1163            break;
1164
1165        /* Variables */
1166        case spv::OpVariable:
1167            module->def_index[insn.word(2)] = insn.offset();
1168            break;
1169
1170        /* Functions */
1171        case spv::OpFunction:
1172            module->def_index[insn.word(2)] = insn.offset();
1173            break;
1174
1175        default:
1176            /* We don't care about any other defs for now. */
1177            break;
1178        }
1179    }
1180}
1181
1182static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
1183    for (auto insn : *src) {
1184        if (insn.opcode() == spv::OpEntryPoint) {
1185            auto entrypointName = (char const *)&insn.word(3);
1186            auto entrypointStageBits = 1u << insn.word(1);
1187
1188            if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
1189                return insn;
1190            }
1191        }
1192    }
1193
1194    return src->end();
1195}
1196
1197bool shader_is_spirv(VkShaderModuleCreateInfo const *pCreateInfo) {
1198    uint32_t *words = (uint32_t *)pCreateInfo->pCode;
1199    size_t sizeInWords = pCreateInfo->codeSize / sizeof(uint32_t);
1200
1201    /* Just validate that the header makes sense. */
1202    return sizeInWords >= 5 && words[0] == spv::MagicNumber && words[1] == spv::Version;
1203}
1204
1205static char const *storage_class_name(unsigned sc) {
1206    switch (sc) {
1207    case spv::StorageClassInput:
1208        return "input";
1209    case spv::StorageClassOutput:
1210        return "output";
1211    case spv::StorageClassUniformConstant:
1212        return "const uniform";
1213    case spv::StorageClassUniform:
1214        return "uniform";
1215    case spv::StorageClassWorkgroup:
1216        return "workgroup local";
1217    case spv::StorageClassCrossWorkgroup:
1218        return "workgroup global";
1219    case spv::StorageClassPrivate:
1220        return "private global";
1221    case spv::StorageClassFunction:
1222        return "function";
1223    case spv::StorageClassGeneric:
1224        return "generic";
1225    case spv::StorageClassAtomicCounter:
1226        return "atomic counter";
1227    case spv::StorageClassImage:
1228        return "image";
1229    case spv::StorageClassPushConstant:
1230        return "push constant";
1231    default:
1232        return "unknown";
1233    }
1234}
1235
1236/* get the value of an integral constant */
1237unsigned get_constant_value(shader_module const *src, unsigned id) {
1238    auto value = src->get_def(id);
1239    assert(value != src->end());
1240
1241    if (value.opcode() != spv::OpConstant) {
1242        /* TODO: Either ensure that the specialization transform is already performed on a module we're
1243            considering here, OR -- specialize on the fly now.
1244            */
1245        return 1;
1246    }
1247
1248    return value.word(3);
1249}
1250
1251
1252static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
1253    auto insn = src->get_def(type);
1254    assert(insn != src->end());
1255
1256    switch (insn.opcode()) {
1257    case spv::OpTypeBool:
1258        ss << "bool";
1259        break;
1260    case spv::OpTypeInt:
1261        ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
1262        break;
1263    case spv::OpTypeFloat:
1264        ss << "float" << insn.word(2);
1265        break;
1266    case spv::OpTypeVector:
1267        ss << "vec" << insn.word(3) << " of ";
1268        describe_type_inner(ss, src, insn.word(2));
1269        break;
1270    case spv::OpTypeMatrix:
1271        ss << "mat" << insn.word(3) << " of ";
1272        describe_type_inner(ss, src, insn.word(2));
1273        break;
1274    case spv::OpTypeArray:
1275        ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
1276        describe_type_inner(ss, src, insn.word(2));
1277        break;
1278    case spv::OpTypePointer:
1279        ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1280        describe_type_inner(ss, src, insn.word(3));
1281        break;
1282    case spv::OpTypeStruct: {
1283        ss << "struct of (";
1284        for (unsigned i = 2; i < insn.len(); i++) {
1285            describe_type_inner(ss, src, insn.word(i));
1286            if (i == insn.len() - 1) {
1287                ss << ")";
1288            } else {
1289                ss << ", ";
1290            }
1291        }
1292        break;
1293    }
1294    case spv::OpTypeSampler:
1295        ss << "sampler";
1296        break;
1297    case spv::OpTypeSampledImage:
1298        ss << "sampler+";
1299        describe_type_inner(ss, src, insn.word(2));
1300        break;
1301    case spv::OpTypeImage:
1302        ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1303        break;
1304    default:
1305        ss << "oddtype";
1306        break;
1307    }
1308}
1309
1310
1311static std::string describe_type(shader_module const *src, unsigned type) {
1312    std::ostringstream ss;
1313    describe_type_inner(ss, src, type);
1314    return ss.str();
1315}
1316
1317
1318static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool b_arrayed) {
1319    /* walk two type trees together, and complain about differences */
1320    auto a_insn = a->get_def(a_type);
1321    auto b_insn = b->get_def(b_type);
1322    assert(a_insn != a->end());
1323    assert(b_insn != b->end());
1324
1325    if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1326        /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */
1327        return types_match(a, b, a_type, b_insn.word(2), false);
1328    }
1329
1330    if (a_insn.opcode() != b_insn.opcode()) {
1331        return false;
1332    }
1333
1334    switch (a_insn.opcode()) {
1335    /* if b_arrayed and we hit a leaf type, then we can't match -- there's nowhere for the extra OpTypeArray to be! */
1336    case spv::OpTypeBool:
1337        return true && !b_arrayed;
1338    case spv::OpTypeInt:
1339        /* match on width, signedness */
1340        return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3) && !b_arrayed;
1341    case spv::OpTypeFloat:
1342        /* match on width */
1343        return a_insn.word(2) == b_insn.word(2) && !b_arrayed;
1344    case spv::OpTypeVector:
1345    case spv::OpTypeMatrix:
1346        /* match on element type, count. these all have the same layout. we don't get here if
1347         * b_arrayed -- that is handled above. */
1348        return !b_arrayed && types_match(a, b, a_insn.word(2), b_insn.word(2), b_arrayed) && a_insn.word(3) == b_insn.word(3);
1349    case spv::OpTypeArray:
1350        /* match on element type, count. these all have the same layout. we don't get here if
1351         * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction,
1352         * not a literal within OpTypeArray */
1353        return !b_arrayed && types_match(a, b, a_insn.word(2), b_insn.word(2), b_arrayed) &&
1354               get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1355    case spv::OpTypeStruct:
1356        /* match on all element types */
1357        {
1358            if (b_arrayed) {
1359                /* for the purposes of matching different levels of arrayness, structs are leaves. */
1360                return false;
1361            }
1362
1363            if (a_insn.len() != b_insn.len()) {
1364                return false; /* structs cannot match if member counts differ */
1365            }
1366
1367            for (unsigned i = 2; i < a_insn.len(); i++) {
1368                if (!types_match(a, b, a_insn.word(i), b_insn.word(i), b_arrayed)) {
1369                    return false;
1370                }
1371            }
1372
1373            return true;
1374        }
1375    case spv::OpTypePointer:
1376        /* match on pointee type. storage class is expected to differ */
1377        return types_match(a, b, a_insn.word(3), b_insn.word(3), b_arrayed);
1378
1379    default:
1380        /* remaining types are CLisms, or may not appear in the interfaces we
1381         * are interested in. Just claim no match.
1382         */
1383        return false;
1384    }
1385}
1386
1387static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1388    auto it = map.find(id);
1389    if (it == map.end())
1390        return def;
1391    else
1392        return it->second;
1393}
1394
1395static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1396    auto insn = src->get_def(type);
1397    assert(insn != src->end());
1398
1399    switch (insn.opcode()) {
1400    case spv::OpTypePointer:
1401        /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
1402         * we're never actually passing pointers around. */
1403        return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1404    case spv::OpTypeArray:
1405        if (strip_array_level) {
1406            return get_locations_consumed_by_type(src, insn.word(2), false);
1407        } else {
1408            return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1409        }
1410    case spv::OpTypeMatrix:
1411        /* num locations is the dimension * element size */
1412        return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1413    default:
1414        /* everything else is just 1. */
1415        return 1;
1416
1417        /* TODO: extend to handle 64bit scalar types, whose vectors may need
1418         * multiple locations. */
1419    }
1420}
1421
1422typedef std::pair<unsigned, unsigned> location_t;
1423typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1424
1425struct interface_var {
1426    uint32_t id;
1427    uint32_t type_id;
1428    uint32_t offset;
1429    /* TODO: collect the name, too? Isn't required to be present. */
1430};
1431
1432static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1433    while (true) {
1434
1435        if (def.opcode() == spv::OpTypePointer) {
1436            def = src->get_def(def.word(3));
1437        } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1438            def = src->get_def(def.word(2));
1439            is_array_of_verts = false;
1440        } else if (def.opcode() == spv::OpTypeStruct) {
1441            return def;
1442        } else {
1443            return src->end();
1444        }
1445    }
1446}
1447
1448static void collect_interface_block_members(layer_data *my_data, shader_module const *src,
1449                                            std::map<location_t, interface_var> &out,
1450                                            std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1451                                            uint32_t id, uint32_t type_id) {
1452    /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */
1453    auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts);
1454    if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1455        /* this isn't an interface block. */
1456        return;
1457    }
1458
1459    std::unordered_map<unsigned, unsigned> member_components;
1460
1461    /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */
1462    for (auto insn : *src) {
1463        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1464            unsigned member_index = insn.word(2);
1465
1466            if (insn.word(3) == spv::DecorationComponent) {
1467                unsigned component = insn.word(4);
1468                member_components[member_index] = component;
1469            }
1470        }
1471    }
1472
1473    /* Second pass -- produce the output, from Location decorations */
1474    for (auto insn : *src) {
1475        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1476            unsigned member_index = insn.word(2);
1477            unsigned member_type_id = type.word(2 + member_index);
1478
1479            if (insn.word(3) == spv::DecorationLocation) {
1480                unsigned location = insn.word(4);
1481                unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1482                auto component_it = member_components.find(member_index);
1483                unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1484
1485                for (unsigned int offset = 0; offset < num_locations; offset++) {
1486                    interface_var v;
1487                    v.id = id;
1488                    /* TODO: member index in interface_var too? */
1489                    v.type_id = member_type_id;
1490                    v.offset = offset;
1491                    out[std::make_pair(location + offset, component)] = v;
1492                }
1493            }
1494        }
1495    }
1496}
1497
1498static void collect_interface_by_location(layer_data *my_data, shader_module const *src, spirv_inst_iter entrypoint,
1499                                          spv::StorageClass sinterface, std::map<location_t, interface_var> &out,
1500                                          bool is_array_of_verts) {
1501    std::unordered_map<unsigned, unsigned> var_locations;
1502    std::unordered_map<unsigned, unsigned> var_builtins;
1503    std::unordered_map<unsigned, unsigned> var_components;
1504    std::unordered_map<unsigned, unsigned> blocks;
1505
1506    for (auto insn : *src) {
1507
1508        /* We consider two interface models: SSO rendezvous-by-location, and
1509         * builtins. Complain about anything that fits neither model.
1510         */
1511        if (insn.opcode() == spv::OpDecorate) {
1512            if (insn.word(2) == spv::DecorationLocation) {
1513                var_locations[insn.word(1)] = insn.word(3);
1514            }
1515
1516            if (insn.word(2) == spv::DecorationBuiltIn) {
1517                var_builtins[insn.word(1)] = insn.word(3);
1518            }
1519
1520            if (insn.word(2) == spv::DecorationComponent) {
1521                var_components[insn.word(1)] = insn.word(3);
1522            }
1523
1524            if (insn.word(2) == spv::DecorationBlock) {
1525                blocks[insn.word(1)] = 1;
1526            }
1527        }
1528    }
1529
1530    /* TODO: handle grouped decorations */
1531    /* TODO: handle index=1 dual source outputs from FS -- two vars will
1532     * have the same location, and we DONT want to clobber. */
1533
1534    /* find the end of the entrypoint's name string. additional zero bytes follow the actual null
1535       terminator, to fill out the rest of the word - so we only need to look at the last byte in
1536       the word to determine which word contains the terminator. */
1537    auto word = 3;
1538    while (entrypoint.word(word) & 0xff000000u) {
1539        ++word;
1540    }
1541    ++word;
1542
1543    for (; word < entrypoint.len(); word++) {
1544        auto insn = src->get_def(entrypoint.word(word));
1545        assert(insn != src->end());
1546        assert(insn.opcode() == spv::OpVariable);
1547
1548        if (insn.word(3) == sinterface) {
1549            unsigned id = insn.word(2);
1550            unsigned type = insn.word(1);
1551
1552            int location = value_or_default(var_locations, id, -1);
1553            int builtin = value_or_default(var_builtins, id, -1);
1554            unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */
1555
1556            /* All variables and interface block members in the Input or Output storage classes
1557             * must be decorated with either a builtin or an explicit location.
1558             *
1559             * TODO: integrate the interface block support here. For now, don't complain --
1560             * a valid SPIRV module will only hit this path for the interface block case, as the
1561             * individual members of the type are decorated, rather than variable declarations.
1562             */
1563
1564            if (location != -1) {
1565                /* A user-defined interface variable, with a location. Where a variable
1566                 * occupied multiple locations, emit one result for each. */
1567                unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts);
1568                for (unsigned int offset = 0; offset < num_locations; offset++) {
1569                    interface_var v;
1570                    v.id = id;
1571                    v.type_id = type;
1572                    v.offset = offset;
1573                    out[std::make_pair(location + offset, component)] = v;
1574                }
1575            } else if (builtin == -1) {
1576                /* An interface block instance */
1577                collect_interface_block_members(my_data, src, out, blocks, is_array_of_verts, id, type);
1578            }
1579        }
1580    }
1581}
1582
1583static void collect_interface_by_descriptor_slot(layer_data *my_data, shader_module const *src,
1584                                                 std::unordered_set<uint32_t> const &accessible_ids,
1585                                                 std::map<descriptor_slot_t, interface_var> &out) {
1586
1587    std::unordered_map<unsigned, unsigned> var_sets;
1588    std::unordered_map<unsigned, unsigned> var_bindings;
1589
1590    for (auto insn : *src) {
1591        /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1592         * DecorationDescriptorSet and DecorationBinding.
1593         */
1594        if (insn.opcode() == spv::OpDecorate) {
1595            if (insn.word(2) == spv::DecorationDescriptorSet) {
1596                var_sets[insn.word(1)] = insn.word(3);
1597            }
1598
1599            if (insn.word(2) == spv::DecorationBinding) {
1600                var_bindings[insn.word(1)] = insn.word(3);
1601            }
1602        }
1603    }
1604
1605    for (auto id : accessible_ids) {
1606        auto insn = src->get_def(id);
1607        assert(insn != src->end());
1608
1609        if (insn.opcode() == spv::OpVariable &&
1610            (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1611            unsigned set = value_or_default(var_sets, insn.word(2), 0);
1612            unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1613
1614            auto existing_it = out.find(std::make_pair(set, binding));
1615            if (existing_it != out.end()) {
1616                /* conflict within spv image */
1617                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1618                        __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
1619                        "var %d (type %d) in %s interface in descriptor slot (%u,%u) conflicts with existing definition",
1620                        insn.word(2), insn.word(1), storage_class_name(insn.word(3)), existing_it->first.first,
1621                        existing_it->first.second);
1622            }
1623
1624            interface_var v;
1625            v.id = insn.word(2);
1626            v.type_id = insn.word(1);
1627            out[std::make_pair(set, binding)] = v;
1628        }
1629    }
1630}
1631
1632static bool validate_interface_between_stages(layer_data *my_data, shader_module const *producer,
1633                                              spirv_inst_iter producer_entrypoint, char const *producer_name,
1634                                              shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1635                                              char const *consumer_name, bool consumer_arrayed_input) {
1636    std::map<location_t, interface_var> outputs;
1637    std::map<location_t, interface_var> inputs;
1638
1639    bool pass = true;
1640
1641    collect_interface_by_location(my_data, producer, producer_entrypoint, spv::StorageClassOutput, outputs, false);
1642    collect_interface_by_location(my_data, consumer, consumer_entrypoint, spv::StorageClassInput, inputs,
1643                                  consumer_arrayed_input);
1644
1645    auto a_it = outputs.begin();
1646    auto b_it = inputs.begin();
1647
1648    /* maps sorted by key (location); walk them together to find mismatches */
1649    while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1650        bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1651        bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1652        auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1653        auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1654
1655        if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1656            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1657                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1658                        "%s writes to output location %u.%u which is not consumed by %s", producer_name, a_first.first,
1659                        a_first.second, consumer_name)) {
1660                pass = false;
1661            }
1662            a_it++;
1663        } else if (a_at_end || a_first > b_first) {
1664            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1665                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
1666                        "%s consumes input location %u.%u which is not written by %s", consumer_name, b_first.first, b_first.second,
1667                        producer_name)) {
1668                pass = false;
1669            }
1670            b_it++;
1671        } else {
1672            if (types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id, consumer_arrayed_input)) {
1673                /* OK! */
1674            } else {
1675                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1676                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1677                            a_first.first, a_first.second,
1678                            describe_type(producer, a_it->second.type_id).c_str(),
1679                            describe_type(consumer, b_it->second.type_id).c_str())) {
1680                    pass = false;
1681                }
1682            }
1683            a_it++;
1684            b_it++;
1685        }
1686    }
1687
1688    return pass;
1689}
1690
1691enum FORMAT_TYPE {
1692    FORMAT_TYPE_UNDEFINED,
1693    FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
1694    FORMAT_TYPE_SINT,
1695    FORMAT_TYPE_UINT,
1696};
1697
1698static unsigned get_format_type(VkFormat fmt) {
1699    switch (fmt) {
1700    case VK_FORMAT_UNDEFINED:
1701        return FORMAT_TYPE_UNDEFINED;
1702    case VK_FORMAT_R8_SINT:
1703    case VK_FORMAT_R8G8_SINT:
1704    case VK_FORMAT_R8G8B8_SINT:
1705    case VK_FORMAT_R8G8B8A8_SINT:
1706    case VK_FORMAT_R16_SINT:
1707    case VK_FORMAT_R16G16_SINT:
1708    case VK_FORMAT_R16G16B16_SINT:
1709    case VK_FORMAT_R16G16B16A16_SINT:
1710    case VK_FORMAT_R32_SINT:
1711    case VK_FORMAT_R32G32_SINT:
1712    case VK_FORMAT_R32G32B32_SINT:
1713    case VK_FORMAT_R32G32B32A32_SINT:
1714    case VK_FORMAT_B8G8R8_SINT:
1715    case VK_FORMAT_B8G8R8A8_SINT:
1716    case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1717    case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1718        return FORMAT_TYPE_SINT;
1719    case VK_FORMAT_R8_UINT:
1720    case VK_FORMAT_R8G8_UINT:
1721    case VK_FORMAT_R8G8B8_UINT:
1722    case VK_FORMAT_R8G8B8A8_UINT:
1723    case VK_FORMAT_R16_UINT:
1724    case VK_FORMAT_R16G16_UINT:
1725    case VK_FORMAT_R16G16B16_UINT:
1726    case VK_FORMAT_R16G16B16A16_UINT:
1727    case VK_FORMAT_R32_UINT:
1728    case VK_FORMAT_R32G32_UINT:
1729    case VK_FORMAT_R32G32B32_UINT:
1730    case VK_FORMAT_R32G32B32A32_UINT:
1731    case VK_FORMAT_B8G8R8_UINT:
1732    case VK_FORMAT_B8G8R8A8_UINT:
1733    case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1734    case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1735        return FORMAT_TYPE_UINT;
1736    default:
1737        return FORMAT_TYPE_FLOAT;
1738    }
1739}
1740
1741/* characterizes a SPIR-V type appearing in an interface to a FF stage,
1742 * for comparison to a VkFormat's characterization above. */
1743static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1744    auto insn = src->get_def(type);
1745    assert(insn != src->end());
1746
1747    switch (insn.opcode()) {
1748    case spv::OpTypeInt:
1749        return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1750    case spv::OpTypeFloat:
1751        return FORMAT_TYPE_FLOAT;
1752    case spv::OpTypeVector:
1753        return get_fundamental_type(src, insn.word(2));
1754    case spv::OpTypeMatrix:
1755        return get_fundamental_type(src, insn.word(2));
1756    case spv::OpTypeArray:
1757        return get_fundamental_type(src, insn.word(2));
1758    case spv::OpTypePointer:
1759        return get_fundamental_type(src, insn.word(3));
1760    default:
1761        return FORMAT_TYPE_UNDEFINED;
1762    }
1763}
1764
1765static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1766    uint32_t bit_pos = u_ffs(stage);
1767    return bit_pos - 1;
1768}
1769
1770static bool validate_vi_consistency(layer_data *my_data, VkPipelineVertexInputStateCreateInfo const *vi) {
1771    /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
1772     * each binding should be specified only once.
1773     */
1774    std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1775    bool pass = true;
1776
1777    for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1778        auto desc = &vi->pVertexBindingDescriptions[i];
1779        auto &binding = bindings[desc->binding];
1780        if (binding) {
1781            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1782                        __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1783                        "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1784                pass = false;
1785            }
1786        } else {
1787            binding = desc;
1788        }
1789    }
1790
1791    return pass;
1792}
1793
1794static bool validate_vi_against_vs_inputs(layer_data *my_data, VkPipelineVertexInputStateCreateInfo const *vi,
1795                                          shader_module const *vs, spirv_inst_iter entrypoint) {
1796    std::map<location_t, interface_var> inputs;
1797    bool pass = true;
1798
1799    collect_interface_by_location(my_data, vs, entrypoint, spv::StorageClassInput, inputs, false);
1800
1801    /* Build index by location */
1802    std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1803    if (vi) {
1804        for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++)
1805            attribs[vi->pVertexAttributeDescriptions[i].location] = &vi->pVertexAttributeDescriptions[i];
1806    }
1807
1808    auto it_a = attribs.begin();
1809    auto it_b = inputs.begin();
1810
1811    while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1812        bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1813        bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1814        auto a_first = a_at_end ? 0 : it_a->first;
1815        auto b_first = b_at_end ? 0 : it_b->first.first;
1816        if (!a_at_end && (b_at_end || a_first < b_first)) {
1817            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1818                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1819                        "Vertex attribute at location %d not consumed by VS", a_first)) {
1820                pass = false;
1821            }
1822            it_a++;
1823        } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1824            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1825                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "VS consumes input at location %d but not provided",
1826                        b_first)) {
1827                pass = false;
1828            }
1829            it_b++;
1830        } else {
1831            unsigned attrib_type = get_format_type(it_a->second->format);
1832            unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1833
1834            /* type checking */
1835            if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1836                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1837                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1838                            "Attribute type of `%s` at location %d does not match VS input type of `%s`",
1839                            string_VkFormat(it_a->second->format), a_first,
1840                            describe_type(vs, it_b->second.type_id).c_str())) {
1841                    pass = false;
1842                }
1843            }
1844
1845            /* OK! */
1846            it_a++;
1847            it_b++;
1848        }
1849    }
1850
1851    return pass;
1852}
1853
1854static bool validate_fs_outputs_against_render_pass(layer_data *my_data, shader_module const *fs,
1855                                                    spirv_inst_iter entrypoint, RENDER_PASS_NODE const *rp, uint32_t subpass) {
1856    const std::vector<VkFormat> &color_formats = rp->subpassColorFormats[subpass];
1857    std::map<location_t, interface_var> outputs;
1858    bool pass = true;
1859
1860    /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
1861
1862    collect_interface_by_location(my_data, fs, entrypoint, spv::StorageClassOutput, outputs, false);
1863
1864    auto it = outputs.begin();
1865    uint32_t attachment = 0;
1866
1867    /* Walk attachment list and outputs together -- this is a little overpowered since attachments
1868     * are currently dense, but the parallel with matching between shader stages is nice.
1869     */
1870
1871    while ((outputs.size() > 0 && it != outputs.end()) || attachment < color_formats.size()) {
1872        if (attachment == color_formats.size() || (it != outputs.end() && it->first.first < attachment)) {
1873            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1874                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1875                        "FS writes to output location %d with no matching attachment", it->first.first)) {
1876                pass = false;
1877            }
1878            it++;
1879        } else if (it == outputs.end() || it->first.first > attachment) {
1880            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1881                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by FS", attachment)) {
1882                pass = false;
1883            }
1884            attachment++;
1885        } else {
1886            unsigned output_type = get_fundamental_type(fs, it->second.type_id);
1887            unsigned att_type = get_format_type(color_formats[attachment]);
1888
1889            /* type checking */
1890            if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
1891                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1892                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1893                            "Attachment %d of type `%s` does not match FS output type of `%s`", attachment,
1894                            string_VkFormat(color_formats[attachment]),
1895                            describe_type(fs, it->second.type_id).c_str())) {
1896                    pass = false;
1897                }
1898            }
1899
1900            /* OK! */
1901            it++;
1902            attachment++;
1903        }
1904    }
1905
1906    return pass;
1907}
1908
1909/* For some analyses, we need to know about all ids referenced by the static call tree of a particular
1910 * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint,
1911 * for example.
1912 * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
1913 *  - NOT the shader input/output interfaces.
1914 *
1915 * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
1916 * converting parts of this to be generated from the machine-readable spec instead.
1917 */
1918static void mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint, std::unordered_set<uint32_t> &ids) {
1919    std::unordered_set<uint32_t> worklist;
1920    worklist.insert(entrypoint.word(2));
1921
1922    while (!worklist.empty()) {
1923        auto id_iter = worklist.begin();
1924        auto id = *id_iter;
1925        worklist.erase(id_iter);
1926
1927        auto insn = src->get_def(id);
1928        if (insn == src->end()) {
1929            /* id is something we didnt collect in build_def_index. that's OK -- we'll stumble
1930             * across all kinds of things here that we may not care about. */
1931            continue;
1932        }
1933
1934        /* try to add to the output set */
1935        if (!ids.insert(id).second) {
1936            continue; /* if we already saw this id, we don't want to walk it again. */
1937        }
1938
1939        switch (insn.opcode()) {
1940        case spv::OpFunction:
1941            /* scan whole body of the function, enlisting anything interesting */
1942            while (++insn, insn.opcode() != spv::OpFunctionEnd) {
1943                switch (insn.opcode()) {
1944                case spv::OpLoad:
1945                case spv::OpAtomicLoad:
1946                case spv::OpAtomicExchange:
1947                case spv::OpAtomicCompareExchange:
1948                case spv::OpAtomicCompareExchangeWeak:
1949                case spv::OpAtomicIIncrement:
1950                case spv::OpAtomicIDecrement:
1951                case spv::OpAtomicIAdd:
1952                case spv::OpAtomicISub:
1953                case spv::OpAtomicSMin:
1954                case spv::OpAtomicUMin:
1955                case spv::OpAtomicSMax:
1956                case spv::OpAtomicUMax:
1957                case spv::OpAtomicAnd:
1958                case spv::OpAtomicOr:
1959                case spv::OpAtomicXor:
1960                    worklist.insert(insn.word(3)); /* ptr */
1961                    break;
1962                case spv::OpStore:
1963                case spv::OpAtomicStore:
1964                    worklist.insert(insn.word(1)); /* ptr */
1965                    break;
1966                case spv::OpAccessChain:
1967                case spv::OpInBoundsAccessChain:
1968                    worklist.insert(insn.word(3)); /* base ptr */
1969                    break;
1970                case spv::OpSampledImage:
1971                case spv::OpImageSampleImplicitLod:
1972                case spv::OpImageSampleExplicitLod:
1973                case spv::OpImageSampleDrefImplicitLod:
1974                case spv::OpImageSampleDrefExplicitLod:
1975                case spv::OpImageSampleProjImplicitLod:
1976                case spv::OpImageSampleProjExplicitLod:
1977                case spv::OpImageSampleProjDrefImplicitLod:
1978                case spv::OpImageSampleProjDrefExplicitLod:
1979                case spv::OpImageFetch:
1980                case spv::OpImageGather:
1981                case spv::OpImageDrefGather:
1982                case spv::OpImageRead:
1983                case spv::OpImage:
1984                case spv::OpImageQueryFormat:
1985                case spv::OpImageQueryOrder:
1986                case spv::OpImageQuerySizeLod:
1987                case spv::OpImageQuerySize:
1988                case spv::OpImageQueryLod:
1989                case spv::OpImageQueryLevels:
1990                case spv::OpImageQuerySamples:
1991                case spv::OpImageSparseSampleImplicitLod:
1992                case spv::OpImageSparseSampleExplicitLod:
1993                case spv::OpImageSparseSampleDrefImplicitLod:
1994                case spv::OpImageSparseSampleDrefExplicitLod:
1995                case spv::OpImageSparseSampleProjImplicitLod:
1996                case spv::OpImageSparseSampleProjExplicitLod:
1997                case spv::OpImageSparseSampleProjDrefImplicitLod:
1998                case spv::OpImageSparseSampleProjDrefExplicitLod:
1999                case spv::OpImageSparseFetch:
2000                case spv::OpImageSparseGather:
2001                case spv::OpImageSparseDrefGather:
2002                case spv::OpImageTexelPointer:
2003                    worklist.insert(insn.word(3)); /* image or sampled image */
2004                    break;
2005                case spv::OpImageWrite:
2006                    worklist.insert(insn.word(1)); /* image -- different operand order to above */
2007                    break;
2008                case spv::OpFunctionCall:
2009                    for (auto i = 3; i < insn.len(); i++) {
2010                        worklist.insert(insn.word(i)); /* fn itself, and all args */
2011                    }
2012                    break;
2013
2014                case spv::OpExtInst:
2015                    for (auto i = 5; i < insn.len(); i++) {
2016                        worklist.insert(insn.word(i)); /* operands to ext inst */
2017                    }
2018                    break;
2019                }
2020            }
2021            break;
2022        }
2023    }
2024}
2025
2026struct shader_stage_attributes {
2027    char const *const name;
2028    bool arrayed_input;
2029};
2030
2031static shader_stage_attributes shader_stage_attribs[] = {
2032    {"vertex shader", false},
2033    {"tessellation control shader", true},
2034    {"tessellation evaluation shader", false},
2035    {"geometry shader", true},
2036    {"fragment shader", false},
2037};
2038
2039static bool validate_push_constant_block_against_pipeline(layer_data *my_data,
2040                                                          std::vector<VkPushConstantRange> const *pushConstantRanges,
2041                                                          shader_module const *src, spirv_inst_iter type,
2042                                                          VkShaderStageFlagBits stage) {
2043    bool pass = true;
2044
2045    /* strip off ptrs etc */
2046    type = get_struct_type(src, type, false);
2047    assert(type != src->end());
2048
2049    /* validate directly off the offsets. this isn't quite correct for arrays
2050     * and matrices, but is a good first step. TODO: arrays, matrices, weird
2051     * sizes */
2052    for (auto insn : *src) {
2053        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
2054
2055            if (insn.word(3) == spv::DecorationOffset) {
2056                unsigned offset = insn.word(4);
2057                auto size = 4; /* bytes; TODO: calculate this based on the type */
2058
2059                bool found_range = false;
2060                for (auto const &range : *pushConstantRanges) {
2061                    if (range.offset <= offset && range.offset + range.size >= offset + size) {
2062                        found_range = true;
2063
2064                        if ((range.stageFlags & stage) == 0) {
2065                            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2066                                        __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2067                                        "Push constant range covering variable starting at "
2068                                        "offset %u not accessible from stage %s",
2069                                        offset, string_VkShaderStageFlagBits(stage))) {
2070                                pass = false;
2071                            }
2072                        }
2073
2074                        break;
2075                    }
2076                }
2077
2078                if (!found_range) {
2079                    if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2080                                __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
2081                                "Push constant range covering variable starting at "
2082                                "offset %u not declared in layout",
2083                                offset)) {
2084                        pass = false;
2085                    }
2086                }
2087            }
2088        }
2089    }
2090
2091    return pass;
2092}
2093
2094static bool validate_push_constant_usage(layer_data *my_data,
2095                                         std::vector<VkPushConstantRange> const *pushConstantRanges, shader_module const *src,
2096                                         std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
2097    bool pass = true;
2098
2099    for (auto id : accessible_ids) {
2100        auto def_insn = src->get_def(id);
2101        if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
2102            pass &= validate_push_constant_block_against_pipeline(my_data, pushConstantRanges, src,
2103                                                                 src->get_def(def_insn.word(1)), stage);
2104        }
2105    }
2106
2107    return pass;
2108}
2109
2110// For given pipelineLayout verify that the setLayout at slot.first
2111//  has the requested binding at slot.second
2112static VkDescriptorSetLayoutBinding const * get_descriptor_binding(layer_data *my_data, PIPELINE_LAYOUT_NODE *pipelineLayout, descriptor_slot_t slot) {
2113
2114    if (!pipelineLayout)
2115        return nullptr;
2116
2117    if (slot.first >= pipelineLayout->descriptorSetLayouts.size())
2118        return nullptr;
2119
2120    auto const layout_node = my_data->descriptorSetLayoutMap[pipelineLayout->descriptorSetLayouts[slot.first]];
2121
2122    auto bindingIt = layout_node->bindingToIndexMap.find(slot.second);
2123    if ((bindingIt == layout_node->bindingToIndexMap.end()) || (layout_node->createInfo.pBindings == NULL))
2124        return nullptr;
2125
2126    assert(bindingIt->second < layout_node->createInfo.bindingCount);
2127    return &layout_node->createInfo.pBindings[bindingIt->second];
2128}
2129
2130// Block of code at start here for managing/tracking Pipeline state that this layer cares about
2131
2132static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
2133
2134// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
2135//   Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
2136//   to that same cmd buffer by separate thread are not changing state from underneath us
2137// Track the last cmd buffer touched by this thread
2138
2139static VkBool32 hasDrawCmd(GLOBAL_CB_NODE *pCB) {
2140    for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
2141        if (pCB->drawCount[i])
2142            return VK_TRUE;
2143    }
2144    return VK_FALSE;
2145}
2146
2147// Check object status for selected flag state
2148static VkBool32 validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
2149                                DRAW_STATE_ERROR error_code, const char *fail_msg) {
2150    if (!(pNode->status & status_mask)) {
2151        return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2152                       reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, error_code, "DS",
2153                       "CB object %#" PRIxLEAST64 ": %s", reinterpret_cast<const uint64_t &>(pNode->commandBuffer), fail_msg);
2154    }
2155    return VK_FALSE;
2156}
2157
2158// Retrieve pipeline node ptr for given pipeline object
2159static PIPELINE_NODE *getPipeline(layer_data *my_data, const VkPipeline pipeline) {
2160    if (my_data->pipelineMap.find(pipeline) == my_data->pipelineMap.end()) {
2161        return NULL;
2162    }
2163    return my_data->pipelineMap[pipeline];
2164}
2165
2166// Return VK_TRUE if for a given PSO, the given state enum is dynamic, else return VK_FALSE
2167static VkBool32 isDynamic(const PIPELINE_NODE *pPipeline, const VkDynamicState state) {
2168    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2169        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2170            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
2171                return VK_TRUE;
2172        }
2173    }
2174    return VK_FALSE;
2175}
2176
2177// Validate state stored as flags at time of draw call
2178static VkBool32 validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe,
2179                                          VkBool32 indexedDraw) {
2180    VkBool32 result;
2181    result = validate_status(dev_data, pCB, CBSTATUS_VIEWPORT_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_VIEWPORT_NOT_BOUND,
2182                             "Dynamic viewport state not set for this command buffer");
2183    result |= validate_status(dev_data, pCB, CBSTATUS_SCISSOR_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_SCISSOR_NOT_BOUND,
2184                              "Dynamic scissor state not set for this command buffer");
2185    if ((pPipe->iaStateCI.topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
2186        (pPipe->iaStateCI.topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP)) {
2187        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2188                                  DRAWSTATE_LINE_WIDTH_NOT_BOUND, "Dynamic line width state not set for this command buffer");
2189    }
2190    if (pPipe->rsStateCI.depthBiasEnable) {
2191        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2192                                  DRAWSTATE_DEPTH_BIAS_NOT_BOUND, "Dynamic depth bias state not set for this command buffer");
2193    }
2194    if (pPipe->blendConstantsEnabled) {
2195        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2196                                  DRAWSTATE_BLEND_NOT_BOUND, "Dynamic blend constants state not set for this command buffer");
2197    }
2198    if (pPipe->dsStateCI.depthBoundsTestEnable) {
2199        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2200                                  DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, "Dynamic depth bounds state not set for this command buffer");
2201    }
2202    if (pPipe->dsStateCI.stencilTestEnable) {
2203        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2204                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil read mask state not set for this command buffer");
2205        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2206                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil write mask state not set for this command buffer");
2207        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2208                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil reference state not set for this command buffer");
2209    }
2210    if (indexedDraw) {
2211        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2212                                  DRAWSTATE_INDEX_BUFFER_NOT_BOUND,
2213                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted");
2214    }
2215    return result;
2216}
2217
2218// Verify attachment reference compatibility according to spec
2219//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2220//  If both AttachmentReference arrays have requested index, check their corresponding AttachementDescriptions
2221//   to make sure that format and samples counts match.
2222//  If not, they are not compatible.
2223static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2224                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2225                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2226                                             const VkAttachmentDescription *pSecondaryAttachments) {
2227    if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2228        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment)
2229            return true;
2230    } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2231        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment)
2232            return true;
2233    } else { // format and sample count must match
2234        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2235             pSecondaryAttachments[pSecondary[index].attachment].format) &&
2236            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2237             pSecondaryAttachments[pSecondary[index].attachment].samples))
2238            return true;
2239    }
2240    // Format and sample counts didn't match
2241    return false;
2242}
2243
2244// For give primary and secondary RenderPass objects, verify that they're compatible
2245static bool verify_renderpass_compatibility(layer_data *my_data, const VkRenderPass primaryRP, const VkRenderPass secondaryRP,
2246                                            string &errorMsg) {
2247    stringstream errorStr;
2248    if (my_data->renderPassMap.find(primaryRP) == my_data->renderPassMap.end()) {
2249        errorStr << "invalid VkRenderPass (" << primaryRP << ")";
2250        errorMsg = errorStr.str();
2251        return false;
2252    } else if (my_data->renderPassMap.find(secondaryRP) == my_data->renderPassMap.end()) {
2253        errorStr << "invalid VkRenderPass (" << secondaryRP << ")";
2254        errorMsg = errorStr.str();
2255        return false;
2256    }
2257    // Trivial pass case is exact same RP
2258    if (primaryRP == secondaryRP) {
2259        return true;
2260    }
2261    const VkRenderPassCreateInfo *primaryRPCI = my_data->renderPassMap[primaryRP]->pCreateInfo;
2262    const VkRenderPassCreateInfo *secondaryRPCI = my_data->renderPassMap[secondaryRP]->pCreateInfo;
2263    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2264        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2265                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2266        errorMsg = errorStr.str();
2267        return false;
2268    }
2269    uint32_t spIndex = 0;
2270    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2271        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2272        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2273        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2274        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2275        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2276            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2277                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2278                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2279                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2280                errorMsg = errorStr.str();
2281                return false;
2282            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2283                                                         primaryColorCount, primaryRPCI->pAttachments,
2284                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2285                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2286                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2287                errorMsg = errorStr.str();
2288                return false;
2289            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2290                                                         primaryColorCount, primaryRPCI->pAttachments,
2291                                                         secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2292                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2293                errorStr << "depth/stencil attachments at index " << cIdx << " of subpass index " << spIndex
2294                         << " are not compatible.";
2295                errorMsg = errorStr.str();
2296                return false;
2297            }
2298        }
2299        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2300        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2301        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2302        for (uint32_t i = 0; i < inputMax; ++i) {
2303            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2304                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2305                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2306                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2307                errorMsg = errorStr.str();
2308                return false;
2309            }
2310        }
2311    }
2312    return true;
2313}
2314
2315// For give SET_NODE, verify that its Set is compatible w/ the setLayout corresponding to pipelineLayout[layoutIndex]
2316static bool verify_set_layout_compatibility(layer_data *my_data, const SET_NODE *pSet, const VkPipelineLayout layout,
2317                                            const uint32_t layoutIndex, string &errorMsg) {
2318    stringstream errorStr;
2319    auto pipeline_layout_it = my_data->pipelineLayoutMap.find(layout);
2320    if (pipeline_layout_it == my_data->pipelineLayoutMap.end()) {
2321        errorStr << "invalid VkPipelineLayout (" << layout << ")";
2322        errorMsg = errorStr.str();
2323        return false;
2324    }
2325    if (layoutIndex >= pipeline_layout_it->second.descriptorSetLayouts.size()) {
2326        errorStr << "VkPipelineLayout (" << layout << ") only contains " << pipeline_layout_it->second.descriptorSetLayouts.size()
2327                 << " setLayouts corresponding to sets 0-" << pipeline_layout_it->second.descriptorSetLayouts.size() - 1
2328                 << ", but you're attempting to bind set to index " << layoutIndex;
2329        errorMsg = errorStr.str();
2330        return false;
2331    }
2332    // Get the specific setLayout from PipelineLayout that overlaps this set
2333    LAYOUT_NODE *pLayoutNode = my_data->descriptorSetLayoutMap[pipeline_layout_it->second.descriptorSetLayouts[layoutIndex]];
2334    if (pLayoutNode->layout == pSet->pLayout->layout) { // trivial pass case
2335        return true;
2336    }
2337    size_t descriptorCount = pLayoutNode->descriptorTypes.size();
2338    if (descriptorCount != pSet->pLayout->descriptorTypes.size()) {
2339        errorStr << "setLayout " << layoutIndex << " from pipelineLayout " << layout << " has " << descriptorCount
2340                 << " descriptors, but corresponding set being bound has " << pSet->pLayout->descriptorTypes.size()
2341                 << " descriptors.";
2342        errorMsg = errorStr.str();
2343        return false; // trivial fail case
2344    }
2345    // Now need to check set against corresponding pipelineLayout to verify compatibility
2346    for (size_t i = 0; i < descriptorCount; ++i) {
2347        // Need to verify that layouts are identically defined
2348        //  TODO : Is below sufficient? Making sure that types & stageFlags match per descriptor
2349        //    do we also need to check immutable samplers?
2350        if (pLayoutNode->descriptorTypes[i] != pSet->pLayout->descriptorTypes[i]) {
2351            errorStr << "descriptor " << i << " for descriptorSet being bound is type '"
2352                     << string_VkDescriptorType(pSet->pLayout->descriptorTypes[i])
2353                     << "' but corresponding descriptor from pipelineLayout is type '"
2354                     << string_VkDescriptorType(pLayoutNode->descriptorTypes[i]) << "'";
2355            errorMsg = errorStr.str();
2356            return false;
2357        }
2358        if (pLayoutNode->stageFlags[i] != pSet->pLayout->stageFlags[i]) {
2359            errorStr << "stageFlags " << i << " for descriptorSet being bound is " << pSet->pLayout->stageFlags[i]
2360                     << "' but corresponding descriptor from pipelineLayout has stageFlags " << pLayoutNode->stageFlags[i];
2361            errorMsg = errorStr.str();
2362            return false;
2363        }
2364    }
2365    return true;
2366}
2367
2368// Validate that data for each specialization entry is fully contained within the buffer.
2369static VkBool32 validate_specialization_offsets(layer_data *my_data, VkPipelineShaderStageCreateInfo const *info) {
2370    VkBool32 pass = VK_TRUE;
2371
2372    VkSpecializationInfo const *spec = info->pSpecializationInfo;
2373
2374    if (spec) {
2375        for (auto i = 0u; i < spec->mapEntryCount; i++) {
2376            if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2377                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2378                            /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC",
2379                            "Specialization entry %u (for constant id %u) references memory outside provided "
2380                            "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2381                            " bytes provided)",
2382                            i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2383                            spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) {
2384
2385                    pass = VK_FALSE;
2386                }
2387            }
2388        }
2389    }
2390
2391    return pass;
2392}
2393
2394static bool descriptor_type_match(layer_data *my_data, shader_module const *module, uint32_t type_id,
2395                                  VkDescriptorType descriptor_type, unsigned &descriptor_count) {
2396    auto type = module->get_def(type_id);
2397
2398    descriptor_count = 1;
2399
2400    /* Strip off any array or ptrs. Where we remove array levels, adjust the
2401     * descriptor count for each dimension. */
2402    while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2403        if (type.opcode() == spv::OpTypeArray) {
2404            descriptor_count *= get_constant_value(module, type.word(3));
2405            type = module->get_def(type.word(2));
2406        }
2407        else {
2408            type = module->get_def(type.word(3));
2409        }
2410    }
2411
2412    switch (type.opcode()) {
2413    case spv::OpTypeStruct: {
2414        for (auto insn : *module) {
2415            if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2416                if (insn.word(2) == spv::DecorationBlock) {
2417                    return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2418                           descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2419                } else if (insn.word(2) == spv::DecorationBufferBlock) {
2420                    return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2421                           descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2422                }
2423            }
2424        }
2425
2426        /* Invalid */
2427        return false;
2428    }
2429
2430    case spv::OpTypeSampler:
2431        return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER;
2432
2433    case spv::OpTypeSampledImage:
2434        if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
2435            /* Slight relaxation for some GLSL historical madness: samplerBuffer
2436             * doesn't really have a sampler, and a texel buffer descriptor
2437             * doesn't really provide one. Allow this slight mismatch.
2438             */
2439            auto image_type = module->get_def(type.word(2));
2440            auto dim = image_type.word(3);
2441            auto sampled = image_type.word(7);
2442            return dim == spv::DimBuffer && sampled == 1;
2443        }
2444        return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2445
2446    case spv::OpTypeImage: {
2447        /* Many descriptor types backing image types-- depends on dimension
2448         * and whether the image will be used with a sampler. SPIRV for
2449         * Vulkan requires that sampled be 1 or 2 -- leaving the decision to
2450         * runtime is unacceptable.
2451         */
2452        auto dim = type.word(3);
2453        auto sampled = type.word(7);
2454
2455        if (dim == spv::DimSubpassData) {
2456            return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2457        } else if (dim == spv::DimBuffer) {
2458            if (sampled == 1) {
2459                return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2460            } else {
2461                return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2462            }
2463        } else if (sampled == 1) {
2464            return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
2465        } else {
2466            return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2467        }
2468    }
2469
2470    /* We shouldn't really see any other junk types -- but if we do, they're
2471     * a mismatch.
2472     */
2473    default:
2474        return false; /* Mismatch */
2475    }
2476}
2477
2478static VkBool32 require_feature(layer_data *my_data, VkBool32 feature, char const *feature_name) {
2479    if (!feature) {
2480        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2481                    __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2482                    "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2483                    "enabled on the device",
2484                    feature_name)) {
2485            return false;
2486        }
2487    }
2488
2489    return true;
2490}
2491
2492static VkBool32 validate_shader_capabilities(layer_data *my_data, shader_module const *src)
2493{
2494    VkBool32 pass = VK_TRUE;
2495
2496    auto enabledFeatures = &my_data->physDevProperties.features;
2497
2498    for (auto insn : *src) {
2499        if (insn.opcode() == spv::OpCapability) {
2500            switch (insn.word(1)) {
2501            case spv::CapabilityMatrix:
2502            case spv::CapabilityShader:
2503            case spv::CapabilityInputAttachment:
2504            case spv::CapabilitySampled1D:
2505            case spv::CapabilityImage1D:
2506            case spv::CapabilitySampledBuffer:
2507            case spv::CapabilityImageBuffer:
2508            case spv::CapabilityImageQuery:
2509            case spv::CapabilityDerivativeControl:
2510                // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2511                break;
2512
2513            case spv::CapabilityGeometry:
2514                pass &= require_feature(my_data, enabledFeatures->geometryShader, "geometryShader");
2515                break;
2516
2517            case spv::CapabilityTessellation:
2518                pass &= require_feature(my_data, enabledFeatures->tessellationShader, "tessellationShader");
2519                break;
2520
2521            case spv::CapabilityFloat64:
2522                pass &= require_feature(my_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2523                break;
2524
2525            case spv::CapabilityInt64:
2526                pass &= require_feature(my_data, enabledFeatures->shaderInt64, "shaderInt64");
2527                break;
2528
2529            case spv::CapabilityTessellationPointSize:
2530            case spv::CapabilityGeometryPointSize:
2531                pass &= require_feature(my_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2532                                        "shaderTessellationAndGeometryPointSize");
2533                break;
2534
2535            case spv::CapabilityImageGatherExtended:
2536                pass &= require_feature(my_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2537                break;
2538
2539            case spv::CapabilityStorageImageMultisample:
2540                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2541                break;
2542
2543            case spv::CapabilityUniformBufferArrayDynamicIndexing:
2544                pass &= require_feature(my_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2545                                        "shaderUniformBufferArrayDynamicIndexing");
2546                break;
2547
2548            case spv::CapabilitySampledImageArrayDynamicIndexing:
2549                pass &= require_feature(my_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2550                                        "shaderSampledImageArrayDynamicIndexing");
2551                break;
2552
2553            case spv::CapabilityStorageBufferArrayDynamicIndexing:
2554                pass &= require_feature(my_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2555                                        "shaderStorageBufferArrayDynamicIndexing");
2556                break;
2557
2558            case spv::CapabilityStorageImageArrayDynamicIndexing:
2559                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2560                                        "shaderStorageImageArrayDynamicIndexing");
2561                break;
2562
2563            case spv::CapabilityClipDistance:
2564                pass &= require_feature(my_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2565                break;
2566
2567            case spv::CapabilityCullDistance:
2568                pass &= require_feature(my_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2569                break;
2570
2571            case spv::CapabilityImageCubeArray:
2572                pass &= require_feature(my_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2573                break;
2574
2575            case spv::CapabilitySampleRateShading:
2576                pass &= require_feature(my_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2577                break;
2578
2579            case spv::CapabilitySparseResidency:
2580                pass &= require_feature(my_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2581                break;
2582
2583            case spv::CapabilityMinLod:
2584                pass &= require_feature(my_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2585                break;
2586
2587            case spv::CapabilitySampledCubeArray:
2588                pass &= require_feature(my_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2589                break;
2590
2591            case spv::CapabilityImageMSArray:
2592                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2593                break;
2594
2595            case spv::CapabilityStorageImageExtendedFormats:
2596                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageExtendedFormats,
2597                                        "shaderStorageImageExtendedFormats");
2598                break;
2599
2600            case spv::CapabilityInterpolationFunction:
2601                pass &= require_feature(my_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2602                break;
2603
2604            case spv::CapabilityStorageImageReadWithoutFormat:
2605                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2606                                        "shaderStorageImageReadWithoutFormat");
2607                break;
2608
2609            case spv::CapabilityStorageImageWriteWithoutFormat:
2610                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2611                                        "shaderStorageImageWriteWithoutFormat");
2612                break;
2613
2614            case spv::CapabilityMultiViewport:
2615                pass &= require_feature(my_data, enabledFeatures->multiViewport, "multiViewport");
2616                break;
2617
2618            default:
2619                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2620                            __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2621                            "Shader declares capability %u, not supported in Vulkan.",
2622                            insn.word(1)))
2623                    pass = VK_FALSE;
2624                break;
2625            }
2626        }
2627    }
2628
2629    return pass;
2630}
2631
2632
2633
2634static VkBool32 validate_pipeline_shader_stage(layer_data *dev_data,
2635                                               VkPipelineShaderStageCreateInfo const *pStage,
2636                                               PIPELINE_NODE *pipeline,
2637                                               PIPELINE_LAYOUT_NODE *pipelineLayout,
2638                                               shader_module **out_module,
2639                                               spirv_inst_iter *out_entrypoint)
2640{
2641    VkBool32 pass = VK_TRUE;
2642    auto module = *out_module = dev_data->shaderModuleMap[pStage->module].get();
2643    pass &= validate_specialization_offsets(dev_data, pStage);
2644
2645    /* find the entrypoint */
2646    auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
2647    if (entrypoint == module->end()) {
2648        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2649                    __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC",
2650                    "No entrypoint found named `%s` for stage %s", pStage->pName,
2651                    string_VkShaderStageFlagBits(pStage->stage))) {
2652            pass = VK_FALSE;
2653        }
2654    }
2655
2656    /* validate shader capabilities against enabled device features */
2657    pass &= validate_shader_capabilities(dev_data, module);
2658
2659    /* mark accessible ids */
2660    std::unordered_set<uint32_t> accessible_ids;
2661    mark_accessible_ids(module, entrypoint, accessible_ids);
2662
2663    /* validate descriptor set layout against what the entrypoint actually uses */
2664    std::map<descriptor_slot_t, interface_var> descriptor_uses;
2665    collect_interface_by_descriptor_slot(dev_data, module, accessible_ids, descriptor_uses);
2666
2667    /* validate push constant usage */
2668    pass &= validate_push_constant_usage(dev_data, &pipelineLayout->pushConstantRanges,
2669                                        module, accessible_ids, pStage->stage);
2670
2671    /* validate descriptor use */
2672    for (auto use : descriptor_uses) {
2673        // While validating shaders capture which slots are used by the pipeline
2674        pipeline->active_slots[use.first.first].insert(use.first.second);
2675
2676        /* find the matching binding */
2677        auto binding = get_descriptor_binding(dev_data, pipelineLayout, use.first);
2678        unsigned required_descriptor_count;
2679
2680        if (!binding) {
2681            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2682                        __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2683                        "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2684                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2685                pass = VK_FALSE;
2686            }
2687        } else if (~binding->stageFlags & pStage->stage) {
2688            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2689                        /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2690                        "Shader uses descriptor slot %u.%u (used "
2691                        "as type `%s`) but descriptor not "
2692                        "accessible from stage %s",
2693                        use.first.first, use.first.second,
2694                        describe_type(module, use.second.type_id).c_str(),
2695                        string_VkShaderStageFlagBits(pStage->stage))) {
2696                pass = VK_FALSE;
2697            }
2698        } else if (!descriptor_type_match(dev_data, module, use.second.type_id, binding->descriptorType, /*out*/ required_descriptor_count)) {
2699            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2700                        __LINE__, SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2701                        "Type mismatch on descriptor slot "
2702                        "%u.%u (used as type `%s`) but "
2703                        "descriptor of type %s",
2704                        use.first.first, use.first.second,
2705                        describe_type(module, use.second.type_id).c_str(),
2706                        string_VkDescriptorType(binding->descriptorType))) {
2707                pass = VK_FALSE;
2708            }
2709        } else if (binding->descriptorCount < required_descriptor_count) {
2710            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2711                        __LINE__, SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2712                        "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2713                        required_descriptor_count, use.first.first, use.first.second,
2714                        describe_type(module, use.second.type_id).c_str(),
2715                        binding->descriptorCount)) {
2716                pass = VK_FALSE;
2717            }
2718        }
2719    }
2720
2721    return pass;
2722}
2723
2724
2725// Validate that the shaders used by the given pipeline and store the active_slots
2726//  that are actually used by the pipeline into pPipeline->active_slots
2727static VkBool32 validate_and_capture_pipeline_shader_state(layer_data *my_data, PIPELINE_NODE *pPipeline) {
2728    VkGraphicsPipelineCreateInfo const *pCreateInfo = &pPipeline->graphicsPipelineCI;
2729    int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2730    int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2731
2732    shader_module *shaders[5];
2733    memset(shaders, 0, sizeof(shaders));
2734    spirv_inst_iter entrypoints[5];
2735    memset(entrypoints, 0, sizeof(entrypoints));
2736    VkPipelineVertexInputStateCreateInfo const *vi = 0;
2737    VkBool32 pass = VK_TRUE;
2738
2739    auto pipelineLayout = pCreateInfo->layout != VK_NULL_HANDLE ? &my_data->pipelineLayoutMap[pCreateInfo->layout] : nullptr;
2740
2741    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2742        VkPipelineShaderStageCreateInfo const *pStage = &pCreateInfo->pStages[i];
2743        auto stage_id = get_shader_stage_id(pStage->stage);
2744        pass &= validate_pipeline_shader_stage(my_data, pStage, pPipeline, pipelineLayout,
2745                                               &shaders[stage_id], &entrypoints[stage_id]);
2746    }
2747
2748    vi = pCreateInfo->pVertexInputState;
2749
2750    if (vi) {
2751        pass = validate_vi_consistency(my_data, vi) && pass;
2752    }
2753
2754    if (shaders[vertex_stage]) {
2755        pass = validate_vi_against_vs_inputs(my_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]) && pass;
2756    }
2757
2758    int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2759    int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2760
2761    while (!shaders[producer] && producer != fragment_stage) {
2762        producer++;
2763        consumer++;
2764    }
2765
2766    for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2767        assert(shaders[producer]);
2768        if (shaders[consumer]) {
2769            pass = validate_interface_between_stages(my_data, shaders[producer], entrypoints[producer],
2770                                                     shader_stage_attribs[producer].name, shaders[consumer], entrypoints[consumer],
2771                                                     shader_stage_attribs[consumer].name,
2772                                                     shader_stage_attribs[consumer].arrayed_input) &&
2773                   pass;
2774
2775            producer = consumer;
2776        }
2777    }
2778
2779    auto rp = pCreateInfo->renderPass != VK_NULL_HANDLE ? my_data->renderPassMap[pCreateInfo->renderPass] : nullptr;
2780
2781    if (shaders[fragment_stage] && rp) {
2782        pass = validate_fs_outputs_against_render_pass(my_data, shaders[fragment_stage], entrypoints[fragment_stage], rp,
2783                                                       pCreateInfo->subpass) &&
2784               pass;
2785    }
2786
2787    return pass;
2788}
2789
2790// Return Set node ptr for specified set or else NULL
2791static SET_NODE *getSetNode(layer_data *my_data, const VkDescriptorSet set) {
2792    if (my_data->setMap.find(set) == my_data->setMap.end()) {
2793        return NULL;
2794    }
2795    return my_data->setMap[set];
2796}
2797
2798// For given Layout Node and binding, return index where that binding begins
2799static uint32_t getBindingStartIndex(const LAYOUT_NODE *pLayout, const uint32_t binding) {
2800    uint32_t offsetIndex = 0;
2801    for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
2802        if (pLayout->createInfo.pBindings[i].binding == binding)
2803            break;
2804        offsetIndex += pLayout->createInfo.pBindings[i].descriptorCount;
2805    }
2806    return offsetIndex;
2807}
2808
2809// For given layout node and binding, return last index that is updated
2810static uint32_t getBindingEndIndex(const LAYOUT_NODE *pLayout, const uint32_t binding) {
2811    uint32_t offsetIndex = 0;
2812    for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
2813        offsetIndex += pLayout->createInfo.pBindings[i].descriptorCount;
2814        if (pLayout->createInfo.pBindings[i].binding == binding)
2815            break;
2816    }
2817    return offsetIndex - 1;
2818}
2819
2820// For the given command buffer, verify that for each set in activeSetBindingsPairs
2821//  that any dynamic descriptor in that set has a valid dynamic offset bound.
2822//  To be valid, the dynamic offset combined with the offset and range from its
2823//  descriptor update must not overflow the size of its buffer being updated
2824static VkBool32 validate_dynamic_offsets(layer_data *my_data, const GLOBAL_CB_NODE *pCB,
2825                                         const vector<std::pair<SET_NODE *, unordered_set<uint32_t>>> &activeSetBindingsPairs) {
2826    VkBool32 result = VK_FALSE;
2827
2828    VkWriteDescriptorSet *pWDS = NULL;
2829    uint32_t dynOffsetIndex = 0;
2830    VkDeviceSize bufferSize = 0;
2831    for (auto set_bindings_pair : activeSetBindingsPairs) {
2832        SET_NODE *set_node = set_bindings_pair.first;
2833        LAYOUT_NODE *layout_node = set_node->pLayout;
2834        for (auto binding : set_bindings_pair.second) {
2835            uint32_t startIdx = getBindingStartIndex(layout_node, binding);
2836            uint32_t endIdx = getBindingEndIndex(layout_node, binding);
2837            for (uint32_t i = startIdx; i <= endIdx; ++i) {
2838                // TODO : Flag error here if set_node->pDescriptorUpdates[i] is NULL
2839                switch (set_node->pDescriptorUpdates[i]->sType) {
2840                case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
2841                    pWDS = (VkWriteDescriptorSet *)set_node->pDescriptorUpdates[i];
2842                    if ((pWDS->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
2843                        (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
2844                        for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2845                            bufferSize = my_data->bufferMap[pWDS->pBufferInfo[j].buffer].create_info->size;
2846                            uint32_t dynOffset = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].dynamicOffsets[dynOffsetIndex];
2847                            if (pWDS->pBufferInfo[j].range == VK_WHOLE_SIZE) {
2848                                if ((dynOffset + pWDS->pBufferInfo[j].offset) > bufferSize) {
2849                                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2850                                                      VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2851                                                      reinterpret_cast<const uint64_t &>(set_node->set), __LINE__,
2852                                                      DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, "DS",
2853                                                      "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has range of "
2854                                                      "VK_WHOLE_SIZE but dynamic offset %#" PRIxLEAST32 ". "
2855                                                      "combined with offset %#" PRIxLEAST64 " oversteps its buffer (%#" PRIxLEAST64
2856                                                      ") which has a size of %#" PRIxLEAST64 ".",
2857                                                      reinterpret_cast<const uint64_t &>(set_node->set), i,
2858                                                      pCB->dynamicOffsets[dynOffsetIndex], pWDS->pBufferInfo[j].offset,
2859                                                      reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize);
2860                                }
2861                            } else if ((dynOffset + pWDS->pBufferInfo[j].offset + pWDS->pBufferInfo[j].range) > bufferSize) {
2862                                result |= log_msg(
2863                                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2864                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2865                                    reinterpret_cast<const uint64_t &>(set_node->set), __LINE__, DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW,
2866                                    "DS",
2867                                    "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has dynamic offset %#" PRIxLEAST32 ". "
2868                                    "Combined with offset %#" PRIxLEAST64 " and range %#" PRIxLEAST64
2869                                    " from its update, this oversteps its buffer "
2870                                    "(%#" PRIxLEAST64 ") which has a size of %#" PRIxLEAST64 ".",
2871                                    reinterpret_cast<const uint64_t &>(set_node->set), i, pCB->dynamicOffsets[dynOffsetIndex],
2872                                    pWDS->pBufferInfo[j].offset, pWDS->pBufferInfo[j].range,
2873                                    reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize);
2874                            } else if ((dynOffset + pWDS->pBufferInfo[j].offset + pWDS->pBufferInfo[j].range) > bufferSize) {
2875                                result |= log_msg(
2876                                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2877                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2878                                    reinterpret_cast<const uint64_t &>(set_node->set), __LINE__, DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW,
2879                                    "DS",
2880                                    "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has dynamic offset %#" PRIxLEAST32 ". "
2881                                    "Combined with offset %#" PRIxLEAST64 " and range %#" PRIxLEAST64
2882                                    " from its update, this oversteps its buffer "
2883                                    "(%#" PRIxLEAST64 ") which has a size of %#" PRIxLEAST64 ".",
2884                                    reinterpret_cast<const uint64_t &>(set_node->set), i, pCB->dynamicOffsets[dynOffsetIndex],
2885                                    pWDS->pBufferInfo[j].offset, pWDS->pBufferInfo[j].range,
2886                                    reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize);
2887                            }
2888                            dynOffsetIndex++;
2889                            i += j; // Advance i to end of this set of descriptors (++i at end of for loop will move 1 index past
2890                                    // last of these descriptors)
2891                        }
2892                    }
2893                    break;
2894                default: // Currently only shadowing Write update nodes so shouldn't get here
2895                    assert(0);
2896                    continue;
2897                }
2898            }
2899        }
2900    }
2901    return result;
2902}
2903
2904// Validate overall state at the time of a draw call
2905static VkBool32 validate_draw_state(layer_data *my_data, GLOBAL_CB_NODE *pCB, VkBool32 indexedDraw) {
2906    PIPELINE_NODE *pPipe = getPipeline(my_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
2907    // First check flag states
2908    VkBool32 result = validate_draw_state_flags(my_data, pCB, pPipe, indexedDraw);
2909    // Now complete other state checks
2910    // TODO : Currently only performing next check if *something* was bound (non-zero last bound)
2911    //  There is probably a better way to gate when this check happens, and to know if something *should* have been bound
2912    //  We should have that check separately and then gate this check based on that check
2913    if (pPipe) {
2914        auto const &state = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS];
2915        if (state.pipelineLayout) {
2916            string errorString;
2917            // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets
2918            vector<std::pair<SET_NODE *, unordered_set<uint32_t>>> activeSetBindingsPairs;
2919            for (auto setBindingPair : pPipe->active_slots) {
2920                uint32_t setIndex = setBindingPair.first;
2921                // If valid set is not bound throw an error
2922                if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
2923                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2924                                      __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
2925                                      "VkPipeline %#" PRIxLEAST64 " uses set #%u but that set is not bound.",
2926                                      (uint64_t)pPipe->pipeline, setIndex);
2927                } else if (!verify_set_layout_compatibility(my_data, my_data->setMap[state.boundDescriptorSets[setIndex]],
2928                                                            pPipe->graphicsPipelineCI.layout, setIndex, errorString)) {
2929                    // Set is bound but not compatible w/ overlapping pipelineLayout from PSO
2930                    VkDescriptorSet setHandle = my_data->setMap[state.boundDescriptorSets[setIndex]]->set;
2931                    result |= log_msg(
2932                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2933                        (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
2934                        "VkDescriptorSet (%#" PRIxLEAST64
2935                        ") bound as set #%u is not compatible with overlapping VkPipelineLayout %#" PRIxLEAST64 " due to: %s",
2936                        (uint64_t)setHandle, setIndex, (uint64_t)pPipe->graphicsPipelineCI.layout, errorString.c_str());
2937                } else { // Valid set is bound and layout compatible, validate that it's updated and verify any dynamic offsets
2938                    // Pull the set node
2939                    SET_NODE *pSet = my_data->setMap[state.boundDescriptorSets[setIndex]];
2940                    // Save vector of all active sets to verify dynamicOffsets below
2941                    // activeSetNodes.push_back(pSet);
2942                    activeSetBindingsPairs.push_back(std::make_pair(pSet, setBindingPair.second));
2943                    // Make sure set has been updated
2944                    if (!pSet->pUpdateStructs) {
2945                        result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2946                                          VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pSet->set, __LINE__,
2947                                          DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2948                                          "DS %#" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so "
2949                                                              "this will result in undefined behavior.",
2950                                          (uint64_t)pSet->set);
2951                    }
2952                }
2953            }
2954            // For each dynamic descriptor, make sure dynamic offset doesn't overstep buffer
2955            if (!state.dynamicOffsets.empty())
2956                result |= validate_dynamic_offsets(my_data, pCB, activeSetBindingsPairs);
2957        }
2958        // Verify Vtx binding
2959        if (pPipe->vertexBindingDescriptions.size() > 0) {
2960            for (size_t i = 0; i < pPipe->vertexBindingDescriptions.size(); i++) {
2961                if ((pCB->currentDrawData.buffers.size() < (i + 1)) || (pCB->currentDrawData.buffers[i] == VK_NULL_HANDLE)) {
2962                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2963                                      __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2964                                      "The Pipeline State Object (%#" PRIxLEAST64
2965                                      ") expects that this Command Buffer's vertex binding Index " PRINTF_SIZE_T_SPECIFIER
2966                                      " should be set via vkCmdBindVertexBuffers.",
2967                                      (uint64_t)pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline, i);
2968                }
2969            }
2970        } else {
2971            if (!pCB->currentDrawData.buffers.empty()) {
2972                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
2973                                  0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2974                                  "Vertex buffers are bound to command buffer (%#" PRIxLEAST64
2975                                  ") but no vertex buffers are attached to this Pipeline State Object (%#" PRIxLEAST64 ").",
2976                                  (uint64_t)pCB->commandBuffer, (uint64_t)pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
2977            }
2978        }
2979        // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
2980        // Skip check if rasterization is disabled or there is no viewport.
2981        if ((!pPipe->graphicsPipelineCI.pRasterizationState ||
2982             !pPipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) &&
2983            pPipe->graphicsPipelineCI.pViewportState) {
2984            VkBool32 dynViewport = isDynamic(pPipe, VK_DYNAMIC_STATE_VIEWPORT);
2985            VkBool32 dynScissor = isDynamic(pPipe, VK_DYNAMIC_STATE_SCISSOR);
2986            if (dynViewport) {
2987                if (pCB->viewports.size() != pPipe->graphicsPipelineCI.pViewportState->viewportCount) {
2988                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2989                                      __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2990                                      "Dynamic viewportCount from vkCmdSetViewport() is " PRINTF_SIZE_T_SPECIFIER
2991                                      ", but PSO viewportCount is %u. These counts must match.",
2992                                      pCB->viewports.size(), pPipe->graphicsPipelineCI.pViewportState->viewportCount);
2993                }
2994            }
2995            if (dynScissor) {
2996                if (pCB->scissors.size() != pPipe->graphicsPipelineCI.pViewportState->scissorCount) {
2997                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2998                                      __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2999                                      "Dynamic scissorCount from vkCmdSetScissor() is " PRINTF_SIZE_T_SPECIFIER
3000                                      ", but PSO scissorCount is %u. These counts must match.",
3001                                      pCB->scissors.size(), pPipe->graphicsPipelineCI.pViewportState->scissorCount);
3002                }
3003            }
3004        }
3005    }
3006    return result;
3007}
3008
3009// Verify that create state for a pipeline is valid
3010static VkBool32 verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_NODE *> pPipelines,
3011                                          int pipelineIndex) {
3012    VkBool32 skipCall = VK_FALSE;
3013
3014    PIPELINE_NODE *pPipeline = pPipelines[pipelineIndex];
3015
3016    // If create derivative bit is set, check that we've specified a base
3017    // pipeline correctly, and that the base pipeline was created to allow
3018    // derivatives.
3019    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
3020        PIPELINE_NODE *pBasePipeline = nullptr;
3021        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
3022              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
3023            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3024                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3025                                "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
3026        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
3027            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
3028                skipCall |=
3029                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3030                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3031                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
3032            } else {
3033                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
3034            }
3035        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
3036            pBasePipeline = getPipeline(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
3037        }
3038
3039        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
3040            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3041                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3042                                "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
3043        }
3044    }
3045
3046    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
3047        if (!my_data->physDevProperties.features.independentBlend) {
3048            if (pPipeline->attachments.size() > 1) {
3049                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
3050                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
3051                    if ((pAttachments[0].blendEnable != pAttachments[i].blendEnable) ||
3052                        (pAttachments[0].srcColorBlendFactor != pAttachments[i].srcColorBlendFactor) ||
3053                        (pAttachments[0].dstColorBlendFactor != pAttachments[i].dstColorBlendFactor) ||
3054                        (pAttachments[0].colorBlendOp != pAttachments[i].colorBlendOp) ||
3055                        (pAttachments[0].srcAlphaBlendFactor != pAttachments[i].srcAlphaBlendFactor) ||
3056                        (pAttachments[0].dstAlphaBlendFactor != pAttachments[i].dstAlphaBlendFactor) ||
3057                        (pAttachments[0].alphaBlendOp != pAttachments[i].alphaBlendOp) ||
3058                        (pAttachments[0].colorWriteMask != pAttachments[i].colorWriteMask)) {
3059                        skipCall |=
3060                            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3061                            DRAWSTATE_INDEPENDENT_BLEND, "DS", "Invalid Pipeline CreateInfo: If independent blend feature not "
3062                            "enabled, all elements of pAttachments must be identical");
3063                    }
3064                }
3065            }
3066        }
3067        if (!my_data->physDevProperties.features.logicOp &&
3068            (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
3069            skipCall |=
3070                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3071                        DRAWSTATE_DISABLED_LOGIC_OP, "DS",
3072                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE");
3073        }
3074        if ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable == VK_TRUE) &&
3075            ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOp < VK_LOGIC_OP_CLEAR) ||
3076             (pPipeline->graphicsPipelineCI.pColorBlendState->logicOp > VK_LOGIC_OP_SET))) {
3077            skipCall |=
3078                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3079                        DRAWSTATE_INVALID_LOGIC_OP, "DS",
3080                        "Invalid Pipeline CreateInfo: If logicOpEnable is VK_TRUE, logicOp must be a valid VkLogicOp value");
3081        }
3082    }
3083
3084    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
3085    // produces nonsense errors that confuse users. Other layers should already
3086    // emit errors for renderpass being invalid.
3087    auto rp_data = my_data->renderPassMap.find(pPipeline->graphicsPipelineCI.renderPass);
3088    if (rp_data != my_data->renderPassMap.end() &&
3089        pPipeline->graphicsPipelineCI.subpass >= rp_data->second->pCreateInfo->subpassCount) {
3090        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3091                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
3092                                                                           "is out of range for this renderpass (0..%u)",
3093                            pPipeline->graphicsPipelineCI.subpass, rp_data->second->pCreateInfo->subpassCount - 1);
3094    }
3095
3096    if (!validate_and_capture_pipeline_shader_state(my_data, pPipeline)) {
3097        skipCall = VK_TRUE;
3098    }
3099    // VS is required
3100    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3101        skipCall |=
3102            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3103                    DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vtx Shader required");
3104    }
3105    // Either both or neither TC/TE shaders should be defined
3106    if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) !=
3107        ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) {
3108        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3109                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3110                            "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair");
3111    }
3112    // Compute shaders should be specified independent of Gfx shaders
3113    if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) &&
3114        (pPipeline->active_shaders &
3115         (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
3116          VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) {
3117        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3118                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3119                            "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline");
3120    }
3121    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3122    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3123    if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3124        (pPipeline->iaStateCI.topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3125        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3126                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3127                                                                           "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3128                                                                           "topology for tessellation pipelines");
3129    }
3130    if (pPipeline->iaStateCI.topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3131        if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3132            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3133                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3134                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3135                                                                               "topology is only valid for tessellation pipelines");
3136        }
3137        if (!pPipeline->tessStateCI.patchControlPoints || (pPipeline->tessStateCI.patchControlPoints > 32)) {
3138            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3139                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3140                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3141                                                                               "topology used with patchControlPoints value %u."
3142                                                                               " patchControlPoints should be >0 and <=32.",
3143                                pPipeline->tessStateCI.patchControlPoints);
3144        }
3145    }
3146    // Viewport state must be included if rasterization is enabled.
3147    // If the viewport state is included, the viewport and scissor counts should always match.
3148    // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler
3149    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3150        !pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
3151        if (!pPipeline->graphicsPipelineCI.pViewportState) {
3152            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3153                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport "
3154                                                                           "and scissors are dynamic PSO must include "
3155                                                                           "viewportCount and scissorCount in pViewportState.");
3156        } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount !=
3157                   pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
3158            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3159                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3160                                "Gfx Pipeline viewport count (%u) must match scissor count (%u).",
3161                                pPipeline->vpStateCI.viewportCount, pPipeline->vpStateCI.scissorCount);
3162        } else {
3163            // If viewport or scissor are not dynamic, then verify that data is appropriate for count
3164            VkBool32 dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3165            VkBool32 dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3166            if (!dynViewport) {
3167                if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount &&
3168                    !pPipeline->graphicsPipelineCI.pViewportState->pViewports) {
3169                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3170                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3171                                        "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you "
3172                                        "must either include pViewports data, or include viewport in pDynamicState and set it with "
3173                                        "vkCmdSetViewport().",
3174                                        pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
3175                }
3176            }
3177            if (!dynScissor) {
3178                if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount &&
3179                    !pPipeline->graphicsPipelineCI.pViewportState->pScissors) {
3180                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3181                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3182                                        "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you "
3183                                        "must either include pScissors data, or include scissor in pDynamicState and set it with "
3184                                        "vkCmdSetScissor().",
3185                                        pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3186                }
3187            }
3188        }
3189    }
3190    return skipCall;
3191}
3192
3193// Init the pipeline mapping info based on pipeline create info LL tree
3194//  Threading note : Calls to this function should wrapped in mutex
3195// TODO : this should really just be in the constructor for PIPELINE_NODE
3196static PIPELINE_NODE *initGraphicsPipeline(layer_data *dev_data, const VkGraphicsPipelineCreateInfo *pCreateInfo) {
3197    PIPELINE_NODE *pPipeline = new PIPELINE_NODE;
3198
3199    // First init create info
3200    memcpy(&pPipeline->graphicsPipelineCI, pCreateInfo, sizeof(VkGraphicsPipelineCreateInfo));
3201
3202    size_t bufferSize = 0;
3203    const VkPipelineVertexInputStateCreateInfo *pVICI = NULL;
3204    const VkPipelineColorBlendStateCreateInfo *pCBCI = NULL;
3205
3206    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
3207        const VkPipelineShaderStageCreateInfo *pPSSCI = &pCreateInfo->pStages[i];
3208
3209        switch (pPSSCI->stage) {
3210        case VK_SHADER_STAGE_VERTEX_BIT:
3211            memcpy(&pPipeline->vsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3212            pPipeline->active_shaders |= VK_SHADER_STAGE_VERTEX_BIT;
3213            break;
3214        case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
3215            memcpy(&pPipeline->tcsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3216            pPipeline->active_shaders |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
3217            break;
3218        case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
3219            memcpy(&pPipeline->tesCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3220            pPipeline->active_shaders |= VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
3221            break;
3222        case VK_SHADER_STAGE_GEOMETRY_BIT:
3223            memcpy(&pPipeline->gsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3224            pPipeline->active_shaders |= VK_SHADER_STAGE_GEOMETRY_BIT;
3225            break;
3226        case VK_SHADER_STAGE_FRAGMENT_BIT:
3227            memcpy(&pPipeline->fsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3228            pPipeline->active_shaders |= VK_SHADER_STAGE_FRAGMENT_BIT;
3229            break;
3230        case VK_SHADER_STAGE_COMPUTE_BIT:
3231            // TODO : Flag error, CS is specified through VkComputePipelineCreateInfo
3232            pPipeline->active_shaders |= VK_SHADER_STAGE_COMPUTE_BIT;
3233            break;
3234        default:
3235            // TODO : Flag error
3236            break;
3237        }
3238    }
3239    // Copy over GraphicsPipelineCreateInfo structure embedded pointers
3240    if (pCreateInfo->stageCount != 0) {
3241        pPipeline->graphicsPipelineCI.pStages = new VkPipelineShaderStageCreateInfo[pCreateInfo->stageCount];
3242        bufferSize = pCreateInfo->stageCount * sizeof(VkPipelineShaderStageCreateInfo);
3243        memcpy((void *)pPipeline->graphicsPipelineCI.pStages, pCreateInfo->pStages, bufferSize);
3244    }
3245    if (pCreateInfo->pVertexInputState != NULL) {
3246        pPipeline->vertexInputCI = *pCreateInfo->pVertexInputState;
3247        // Copy embedded ptrs
3248        pVICI = pCreateInfo->pVertexInputState;
3249        if (pVICI->vertexBindingDescriptionCount) {
3250            pPipeline->vertexBindingDescriptions = std::vector<VkVertexInputBindingDescription>(
3251                pVICI->pVertexBindingDescriptions, pVICI->pVertexBindingDescriptions + pVICI->vertexBindingDescriptionCount);
3252        }
3253        if (pVICI->vertexAttributeDescriptionCount) {
3254            pPipeline->vertexAttributeDescriptions = std::vector<VkVertexInputAttributeDescription>(
3255                pVICI->pVertexAttributeDescriptions, pVICI->pVertexAttributeDescriptions + pVICI->vertexAttributeDescriptionCount);
3256        }
3257        pPipeline->graphicsPipelineCI.pVertexInputState = &pPipeline->vertexInputCI;
3258    }
3259    if (pCreateInfo->pInputAssemblyState != NULL) {
3260        pPipeline->iaStateCI = *pCreateInfo->pInputAssemblyState;
3261        pPipeline->graphicsPipelineCI.pInputAssemblyState = &pPipeline->iaStateCI;
3262    }
3263    if (pCreateInfo->pTessellationState != NULL) {
3264        pPipeline->tessStateCI = *pCreateInfo->pTessellationState;
3265        pPipeline->graphicsPipelineCI.pTessellationState = &pPipeline->tessStateCI;
3266    }
3267    if (pCreateInfo->pViewportState != NULL) {
3268        pPipeline->vpStateCI = *pCreateInfo->pViewportState;
3269        pPipeline->graphicsPipelineCI.pViewportState = &pPipeline->vpStateCI;
3270    }
3271    if (pCreateInfo->pRasterizationState != NULL) {
3272        pPipeline->rsStateCI = *pCreateInfo->pRasterizationState;
3273        pPipeline->graphicsPipelineCI.pRasterizationState = &pPipeline->rsStateCI;
3274    }
3275    if (pCreateInfo->pMultisampleState != NULL) {
3276        pPipeline->msStateCI = *pCreateInfo->pMultisampleState;
3277        pPipeline->graphicsPipelineCI.pMultisampleState = &pPipeline->msStateCI;
3278    }
3279    if (pCreateInfo->pDepthStencilState != NULL) {
3280        pPipeline->dsStateCI = *pCreateInfo->pDepthStencilState;
3281        pPipeline->graphicsPipelineCI.pDepthStencilState = &pPipeline->dsStateCI;
3282    }
3283    if (pCreateInfo->pColorBlendState != NULL) {
3284        pPipeline->cbStateCI = *pCreateInfo->pColorBlendState;
3285        // Copy embedded ptrs
3286        pCBCI = pCreateInfo->pColorBlendState;
3287        if (pCBCI->attachmentCount) {
3288            pPipeline->attachments = std::vector<VkPipelineColorBlendAttachmentState>(
3289                pCBCI->pAttachments, pCBCI->pAttachments + pCBCI->attachmentCount);
3290        }
3291        pPipeline->graphicsPipelineCI.pColorBlendState = &pPipeline->cbStateCI;
3292    }
3293    if (pCreateInfo->pDynamicState != NULL) {
3294        pPipeline->dynStateCI = *pCreateInfo->pDynamicState;
3295        if (pPipeline->dynStateCI.dynamicStateCount) {
3296            pPipeline->dynStateCI.pDynamicStates = new VkDynamicState[pPipeline->dynStateCI.dynamicStateCount];
3297            bufferSize = pPipeline->dynStateCI.dynamicStateCount * sizeof(VkDynamicState);
3298            memcpy((void *)pPipeline->dynStateCI.pDynamicStates, pCreateInfo->pDynamicState->pDynamicStates, bufferSize);
3299        }
3300        pPipeline->graphicsPipelineCI.pDynamicState = &pPipeline->dynStateCI;
3301    }
3302    return pPipeline;
3303}
3304
3305// Free the Pipeline nodes
3306static void deletePipelines(layer_data *my_data) {
3307    if (my_data->pipelineMap.size() <= 0)
3308        return;
3309    for (auto ii = my_data->pipelineMap.begin(); ii != my_data->pipelineMap.end(); ++ii) {
3310        if ((*ii).second->graphicsPipelineCI.stageCount != 0) {
3311            delete[](*ii).second->graphicsPipelineCI.pStages;
3312        }
3313        if ((*ii).second->dynStateCI.dynamicStateCount != 0) {
3314            delete[](*ii).second->dynStateCI.pDynamicStates;
3315        }
3316        delete (*ii).second;
3317    }
3318    my_data->pipelineMap.clear();
3319}
3320
3321// For given pipeline, return number of MSAA samples, or one if MSAA disabled
3322static VkSampleCountFlagBits getNumSamples(layer_data *my_data, const VkPipeline pipeline) {
3323    PIPELINE_NODE *pPipe = my_data->pipelineMap[pipeline];
3324    if (VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pPipe->msStateCI.sType) {
3325        return pPipe->msStateCI.rasterizationSamples;
3326    }
3327    return VK_SAMPLE_COUNT_1_BIT;
3328}
3329
3330// Validate state related to the PSO
3331static VkBool32 validatePipelineState(layer_data *my_data, const GLOBAL_CB_NODE *pCB, const VkPipelineBindPoint pipelineBindPoint,
3332                                      const VkPipeline pipeline) {
3333    if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
3334        // Verify that any MSAA request in PSO matches sample# in bound FB
3335        // Skip the check if rasterization is disabled.
3336        PIPELINE_NODE *pPipeline = my_data->pipelineMap[pipeline];
3337        if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3338            !pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
3339            VkSampleCountFlagBits psoNumSamples = getNumSamples(my_data, pipeline);
3340            if (pCB->activeRenderPass) {
3341                const VkRenderPassCreateInfo *pRPCI = my_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo;
3342                const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
3343                VkSampleCountFlagBits subpassNumSamples = (VkSampleCountFlagBits)0;
3344                uint32_t i;
3345
3346                for (i = 0; i < pSD->colorAttachmentCount; i++) {
3347                    VkSampleCountFlagBits samples;
3348
3349                    if (pSD->pColorAttachments[i].attachment == VK_ATTACHMENT_UNUSED)
3350                        continue;
3351
3352                    samples = pRPCI->pAttachments[pSD->pColorAttachments[i].attachment].samples;
3353                    if (subpassNumSamples == (VkSampleCountFlagBits)0) {
3354                        subpassNumSamples = samples;
3355                    } else if (subpassNumSamples != samples) {
3356                        subpassNumSamples = (VkSampleCountFlagBits)-1;
3357                        break;
3358                    }
3359                }
3360                if (pSD->pDepthStencilAttachment && pSD->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3361                    const VkSampleCountFlagBits samples = pRPCI->pAttachments[pSD->pDepthStencilAttachment->attachment].samples;
3362                    if (subpassNumSamples == (VkSampleCountFlagBits)0)
3363                        subpassNumSamples = samples;
3364                    else if (subpassNumSamples != samples)
3365                        subpassNumSamples = (VkSampleCountFlagBits)-1;
3366                }
3367
3368                if (psoNumSamples != subpassNumSamples) {
3369                    return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3370                                   (uint64_t)pipeline, __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
3371                                   "Num samples mismatch! Binding PSO (%#" PRIxLEAST64
3372                                   ") with %u samples while current RenderPass (%#" PRIxLEAST64 ") w/ %u samples!",
3373                                   (uint64_t)pipeline, psoNumSamples, (uint64_t)pCB->activeRenderPass, subpassNumSamples);
3374                }
3375            } else {
3376                // TODO : I believe it's an error if we reach this point and don't have an activeRenderPass
3377                //   Verify and flag error as appropriate
3378            }
3379        }
3380        // TODO : Add more checks here
3381    } else {
3382        // TODO : Validate non-gfx pipeline updates
3383    }
3384    return VK_FALSE;
3385}
3386
3387// Block of code at start here specifically for managing/tracking DSs
3388
3389// Return Pool node ptr for specified pool or else NULL
3390static DESCRIPTOR_POOL_NODE *getPoolNode(layer_data *my_data, const VkDescriptorPool pool) {
3391    if (my_data->descriptorPoolMap.find(pool) == my_data->descriptorPoolMap.end()) {
3392        return NULL;
3393    }
3394    return my_data->descriptorPoolMap[pool];
3395}
3396
3397static LAYOUT_NODE *getLayoutNode(layer_data *my_data, const VkDescriptorSetLayout layout) {
3398    if (my_data->descriptorSetLayoutMap.find(layout) == my_data->descriptorSetLayoutMap.end()) {
3399        return NULL;
3400    }
3401    return my_data->descriptorSetLayoutMap[layout];
3402}
3403
3404// Return VK_FALSE if update struct is of valid type, otherwise flag error and return code from callback
3405static VkBool32 validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3406    switch (pUpdateStruct->sType) {
3407    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3408    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3409        return VK_FALSE;
3410    default:
3411        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3412                       DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3413                       "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3414                       string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3415    }
3416}
3417
3418// Set count for given update struct in the last parameter
3419// Return value of skipCall, which is only VK_TRUE if error occurs and callback signals execution to cease
3420static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3421    switch (pUpdateStruct->sType) {
3422    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3423        return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3424    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3425        // TODO : Need to understand this case better and make sure code is correct
3426        return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3427    default:
3428        return 0;
3429    }
3430    return 0;
3431}
3432
3433// For given layout and update, return the first overall index of the layout that is updated
3434static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout, const uint32_t binding,
3435                                    const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3436    return getBindingStartIndex(pLayout, binding) + arrayIndex;
3437}
3438
3439// For given layout and update, return the last overall index of the layout that is updated
3440static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout, const uint32_t binding,
3441                                  const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3442    uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3443    return getBindingStartIndex(pLayout, binding) + arrayIndex + count - 1;
3444}
3445
3446// Verify that the descriptor type in the update struct matches what's expected by the layout
3447static VkBool32 validateUpdateConsistency(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout,
3448                                          const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3449    // First get actual type of update
3450    VkBool32 skipCall = VK_FALSE;
3451    VkDescriptorType actualType;
3452    uint32_t i = 0;
3453    switch (pUpdateStruct->sType) {
3454    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3455        actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3456        break;
3457    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3458        /* no need to validate */
3459        return VK_FALSE;
3460        break;
3461    default:
3462        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3463                            DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3464                            "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3465                            string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3466    }
3467    if (VK_FALSE == skipCall) {
3468        // Set first stageFlags as reference and verify that all other updates match it
3469        VkShaderStageFlags refStageFlags = pLayout->stageFlags[startIndex];
3470        for (i = startIndex; i <= endIndex; i++) {
3471            if (pLayout->descriptorTypes[i] != actualType) {
3472                skipCall |= log_msg(
3473                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3474                    DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3475                    "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3476                    string_VkDescriptorType(actualType), string_VkDescriptorType(pLayout->descriptorTypes[i]));
3477            }
3478            if (pLayout->stageFlags[i] != refStageFlags) {
3479                skipCall |= log_msg(
3480                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3481                    DRAWSTATE_DESCRIPTOR_STAGEFLAGS_MISMATCH, "DS",
3482                    "Write descriptor update has stageFlags %x that do not match overlapping binding descriptor stageFlags of %x!",
3483                    refStageFlags, pLayout->stageFlags[i]);
3484            }
3485        }
3486    }
3487    return skipCall;
3488}
3489
3490// Determine the update type, allocate a new struct of that type, shadow the given pUpdate
3491//   struct into the pNewNode param. Return VK_TRUE if error condition encountered and callback signals early exit.
3492// NOTE : Calls to this function should be wrapped in mutex
3493static VkBool32 shadowUpdateNode(layer_data *my_data, const VkDevice device, GENERIC_HEADER *pUpdate, GENERIC_HEADER **pNewNode) {
3494    VkBool32 skipCall = VK_FALSE;
3495    VkWriteDescriptorSet *pWDS = NULL;
3496    VkCopyDescriptorSet *pCDS = NULL;
3497    switch (pUpdate->sType) {
3498    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3499        pWDS = new VkWriteDescriptorSet;
3500        *pNewNode = (GENERIC_HEADER *)pWDS;
3501        memcpy(pWDS, pUpdate, sizeof(VkWriteDescriptorSet));
3502
3503        switch (pWDS->descriptorType) {
3504        case VK_DESCRIPTOR_TYPE_SAMPLER:
3505        case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3506        case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3507        case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
3508            VkDescriptorImageInfo *info = new VkDescriptorImageInfo[pWDS->descriptorCount];
3509            memcpy(info, pWDS->pImageInfo, pWDS->descriptorCount * sizeof(VkDescriptorImageInfo));
3510            pWDS->pImageInfo = info;
3511        } break;
3512        case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
3513        case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
3514            VkBufferView *info = new VkBufferView[pWDS->descriptorCount];
3515            memcpy(info, pWDS->pTexelBufferView, pWDS->descriptorCount * sizeof(VkBufferView));
3516            pWDS->pTexelBufferView = info;
3517        } break;
3518        case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3519        case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3520        case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
3521        case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
3522            VkDescriptorBufferInfo *info = new VkDescriptorBufferInfo[pWDS->descriptorCount];
3523            memcpy(info, pWDS->pBufferInfo, pWDS->descriptorCount * sizeof(VkDescriptorBufferInfo));
3524            pWDS->pBufferInfo = info;
3525        } break;
3526        default:
3527            return VK_ERROR_VALIDATION_FAILED_EXT;
3528            break;
3529        }
3530        break;
3531    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3532        pCDS = new VkCopyDescriptorSet;
3533        *pNewNode = (GENERIC_HEADER *)pCDS;
3534        memcpy(pCDS, pUpdate, sizeof(VkCopyDescriptorSet));
3535        break;
3536    default:
3537        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3538                    DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3539                    "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3540                    string_VkStructureType(pUpdate->sType), pUpdate->sType))
3541            return VK_TRUE;
3542    }
3543    // Make sure that pNext for the end of shadow copy is NULL
3544    (*pNewNode)->pNext = NULL;
3545    return skipCall;
3546}
3547
3548// Verify that given sampler is valid
3549static VkBool32 validateSampler(const layer_data *my_data, const VkSampler *pSampler, const VkBool32 immutable) {
3550    VkBool32 skipCall = VK_FALSE;
3551    auto sampIt = my_data->sampleMap.find(*pSampler);
3552    if (sampIt == my_data->sampleMap.end()) {
3553        if (!immutable) {
3554            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3555                                (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS",
3556                                "vkUpdateDescriptorSets: Attempt to update descriptor with invalid sampler %#" PRIxLEAST64,
3557                                (uint64_t)*pSampler);
3558        } else { // immutable
3559            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3560                                (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS",
3561                                "vkUpdateDescriptorSets: Attempt to update descriptor whose binding has an invalid immutable "
3562                                "sampler %#" PRIxLEAST64,
3563                                (uint64_t)*pSampler);
3564        }
3565    } else {
3566        // TODO : Any further checks we want to do on the sampler?
3567    }
3568    return skipCall;
3569}
3570
3571// find layout(s) on the cmd buf level
3572bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3573    ImageSubresourcePair imgpair = {image, true, range};
3574    auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3575    if (imgsubIt == pCB->imageLayoutMap.end()) {
3576        imgpair = {image, false, VkImageSubresource()};
3577        imgsubIt = pCB->imageLayoutMap.find(imgpair);
3578        if (imgsubIt == pCB->imageLayoutMap.end())
3579            return false;
3580    }
3581    node = imgsubIt->second;
3582    return true;
3583}
3584
3585// find layout(s) on the global level
3586bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3587    auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3588    if (imgsubIt == my_data->imageLayoutMap.end()) {
3589        imgpair = {imgpair.image, false, VkImageSubresource()};
3590        imgsubIt = my_data->imageLayoutMap.find(imgpair);
3591        if (imgsubIt == my_data->imageLayoutMap.end())
3592            return false;
3593    }
3594    layout = imgsubIt->second.layout;
3595    return true;
3596}
3597
3598bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3599    ImageSubresourcePair imgpair = {image, true, range};
3600    return FindLayout(my_data, imgpair, layout);
3601}
3602
3603bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3604    auto sub_data = my_data->imageSubresourceMap.find(image);
3605    if (sub_data == my_data->imageSubresourceMap.end())
3606        return false;
3607    auto imgIt = my_data->imageMap.find(image);
3608    if (imgIt == my_data->imageMap.end())
3609        return false;
3610    bool ignoreGlobal = false;
3611    // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3612    // potential errors in this case.
3613    if (sub_data->second.size() >= (imgIt->second.createInfo.arrayLayers * imgIt->second.createInfo.mipLevels + 1)) {
3614        ignoreGlobal = true;
3615    }
3616    for (auto imgsubpair : sub_data->second) {
3617        if (ignoreGlobal && !imgsubpair.hasSubresource)
3618            continue;
3619        auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3620        if (img_data != my_data->imageLayoutMap.end()) {
3621            layouts.push_back(img_data->second.layout);
3622        }
3623    }
3624    return true;
3625}
3626
3627// Set the layout on the global level
3628void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3629    VkImage &image = imgpair.image;
3630    // TODO (mlentine): Maybe set format if new? Not used atm.
3631    my_data->imageLayoutMap[imgpair].layout = layout;
3632    // TODO (mlentine): Maybe make vector a set?
3633    auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3634    if (subresource == my_data->imageSubresourceMap[image].end()) {
3635        my_data->imageSubresourceMap[image].push_back(imgpair);
3636    }
3637}
3638
3639// Set the layout on the cmdbuf level
3640void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3641    pCB->imageLayoutMap[imgpair] = node;
3642    // TODO (mlentine): Maybe make vector a set?
3643    auto subresource =
3644        std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair);
3645    if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) {
3646        pCB->imageSubresourceMap[imgpair.image].push_back(imgpair);
3647    }
3648}
3649
3650void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3651    // TODO (mlentine): Maybe make vector a set?
3652    if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) !=
3653        pCB->imageSubresourceMap[imgpair.image].end()) {
3654        pCB->imageLayoutMap[imgpair].layout = layout;
3655    } else {
3656        // TODO (mlentine): Could be expensive and might need to be removed.
3657        assert(imgpair.hasSubresource);
3658        IMAGE_CMD_BUF_LAYOUT_NODE node;
3659        if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) {
3660            node.initialLayout = layout;
3661        }
3662        SetLayout(pCB, imgpair, {node.initialLayout, layout});
3663    }
3664}
3665
3666template <class OBJECT, class LAYOUT>
3667void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
3668    if (imgpair.subresource.aspectMask & aspectMask) {
3669        imgpair.subresource.aspectMask = aspectMask;
3670        SetLayout(pObject, imgpair, layout);
3671    }
3672}
3673
3674template <class OBJECT, class LAYOUT>
3675void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
3676    ImageSubresourcePair imgpair = {image, true, range};
3677    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3678    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3679    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3680    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3681}
3682
3683template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) {
3684    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3685    SetLayout(pObject, image, imgpair, layout);
3686}
3687
3688void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
3689    auto image_view_data = dev_data->imageViewMap.find(imageView);
3690    assert(image_view_data != dev_data->imageViewMap.end());
3691    const VkImage &image = image_view_data->second.image;
3692    const VkImageSubresourceRange &subRange = image_view_data->second.subresourceRange;
3693    // TODO: Do not iterate over every possibility - consolidate where possible
3694    for (uint32_t j = 0; j < subRange.levelCount; j++) {
3695        uint32_t level = subRange.baseMipLevel + j;
3696        for (uint32_t k = 0; k < subRange.layerCount; k++) {
3697            uint32_t layer = subRange.baseArrayLayer + k;
3698            VkImageSubresource sub = {subRange.aspectMask, level, layer};
3699            SetLayout(pCB, image, sub, layout);
3700        }
3701    }
3702}
3703
3704// Verify that given imageView is valid
3705static VkBool32 validateImageView(const layer_data *my_data, const VkImageView *pImageView, const VkImageLayout imageLayout) {
3706    VkBool32 skipCall = VK_FALSE;
3707    auto ivIt = my_data->imageViewMap.find(*pImageView);
3708    if (ivIt == my_data->imageViewMap.end()) {
3709        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3710                            (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3711                            "vkUpdateDescriptorSets: Attempt to update descriptor with invalid imageView %#" PRIxLEAST64,
3712                            (uint64_t)*pImageView);
3713    } else {
3714        // Validate that imageLayout is compatible with aspectMask and image format
3715        VkImageAspectFlags aspectMask = ivIt->second.subresourceRange.aspectMask;
3716        VkImage image = ivIt->second.image;
3717        // TODO : Check here in case we have a bad image
3718        VkFormat format = VK_FORMAT_MAX_ENUM;
3719        auto imgIt = my_data->imageMap.find(image);
3720        if (imgIt != my_data->imageMap.end()) {
3721            format = (*imgIt).second.createInfo.format;
3722        } else {
3723            // Also need to check the swapchains.
3724            auto swapchainIt = my_data->device_extensions.imageToSwapchainMap.find(image);
3725            if (swapchainIt != my_data->device_extensions.imageToSwapchainMap.end()) {
3726                VkSwapchainKHR swapchain = swapchainIt->second;
3727                auto swapchain_nodeIt = my_data->device_extensions.swapchainMap.find(swapchain);
3728                if (swapchain_nodeIt != my_data->device_extensions.swapchainMap.end()) {
3729                    SWAPCHAIN_NODE *pswapchain_node = swapchain_nodeIt->second;
3730                    format = pswapchain_node->createInfo.imageFormat;
3731                }
3732            }
3733        }
3734        if (format == VK_FORMAT_MAX_ENUM) {
3735            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3736                                (uint64_t)image, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3737                                "vkUpdateDescriptorSets: Attempt to update descriptor with invalid image %#" PRIxLEAST64
3738                                " in imageView %#" PRIxLEAST64,
3739                                (uint64_t)image, (uint64_t)*pImageView);
3740        } else {
3741            VkBool32 ds = vk_format_is_depth_or_stencil(format);
3742            switch (imageLayout) {
3743            case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
3744                // Only Color bit must be set
3745                if ((aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) {
3746                    skipCall |=
3747                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3748                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3749                                "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL "
3750                                "and imageView %#" PRIxLEAST64 ""
3751                                " that does not have VK_IMAGE_ASPECT_COLOR_BIT set.",
3752                                (uint64_t)*pImageView);
3753                }
3754                // format must NOT be DS
3755                if (ds) {
3756                    skipCall |=
3757                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3758                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3759                                "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL "
3760                                "and imageView %#" PRIxLEAST64 ""
3761                                " but the image format is %s which is not a color format.",
3762                                (uint64_t)*pImageView, string_VkFormat(format));
3763                }
3764                break;
3765            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
3766            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
3767                // Depth or stencil bit must be set, but both must NOT be set
3768                if (aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
3769                    if (aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) {
3770                        // both  must NOT be set
3771                        skipCall |=
3772                            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3773                                    (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3774                                    "vkUpdateDescriptorSets: Updating descriptor with imageView %#" PRIxLEAST64 ""
3775                                    " that has both STENCIL and DEPTH aspects set",
3776                                    (uint64_t)*pImageView);
3777                    }
3778                } else if (!(aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
3779                    // Neither were set
3780                    skipCall |=
3781                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3782                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3783                                "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 ""
3784                                " that does not have STENCIL or DEPTH aspect set.",
3785                                string_VkImageLayout(imageLayout), (uint64_t)*pImageView);
3786                }
3787                // format must be DS
3788                if (!ds) {
3789                    skipCall |=
3790                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3791                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3792                                "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 ""
3793                                " but the image format is %s which is not a depth/stencil format.",
3794                                string_VkImageLayout(imageLayout), (uint64_t)*pImageView, string_VkFormat(format));
3795                }
3796                break;
3797            default:
3798                // anything to check for other layouts?
3799                break;
3800            }
3801        }
3802    }
3803    return skipCall;
3804}
3805
3806// Verify that given bufferView is valid
3807static VkBool32 validateBufferView(const layer_data *my_data, const VkBufferView *pBufferView) {
3808    VkBool32 skipCall = VK_FALSE;
3809    auto sampIt = my_data->bufferViewMap.find(*pBufferView);
3810    if (sampIt == my_data->bufferViewMap.end()) {
3811        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT,
3812                            (uint64_t)*pBufferView, __LINE__, DRAWSTATE_BUFFERVIEW_DESCRIPTOR_ERROR, "DS",
3813                            "vkUpdateDescriptorSets: Attempt to update descriptor with invalid bufferView %#" PRIxLEAST64,
3814                            (uint64_t)*pBufferView);
3815    } else {
3816        // TODO : Any further checks we want to do on the bufferView?
3817    }
3818    return skipCall;
3819}
3820
3821// Verify that given bufferInfo is valid
3822static VkBool32 validateBufferInfo(const layer_data *my_data, const VkDescriptorBufferInfo *pBufferInfo) {
3823    VkBool32 skipCall = VK_FALSE;
3824    auto sampIt = my_data->bufferMap.find(pBufferInfo->buffer);
3825    if (sampIt == my_data->bufferMap.end()) {
3826        skipCall |=
3827            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3828                    (uint64_t)pBufferInfo->buffer, __LINE__, DRAWSTATE_BUFFERINFO_DESCRIPTOR_ERROR, "DS",
3829                    "vkUpdateDescriptorSets: Attempt to update descriptor where bufferInfo has invalid buffer %#" PRIxLEAST64,
3830                    (uint64_t)pBufferInfo->buffer);
3831    } else {
3832        // TODO : Any further checks we want to do on the bufferView?
3833    }
3834    return skipCall;
3835}
3836
3837static VkBool32 validateUpdateContents(const layer_data *my_data, const VkWriteDescriptorSet *pWDS,
3838                                       const VkDescriptorSetLayoutBinding *pLayoutBinding) {
3839    VkBool32 skipCall = VK_FALSE;
3840    // First verify that for the given Descriptor type, the correct DescriptorInfo data is supplied
3841    const VkSampler *pSampler = NULL;
3842    VkBool32 immutable = VK_FALSE;
3843    uint32_t i = 0;
3844    // For given update type, verify that update contents are correct
3845    switch (pWDS->descriptorType) {
3846    case VK_DESCRIPTOR_TYPE_SAMPLER:
3847        for (i = 0; i < pWDS->descriptorCount; ++i) {
3848            skipCall |= validateSampler(my_data, &(pWDS->pImageInfo[i].sampler), immutable);
3849        }
3850        break;
3851    case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3852        for (i = 0; i < pWDS->descriptorCount; ++i) {
3853            if (NULL == pLayoutBinding->pImmutableSamplers) {
3854                pSampler = &(pWDS->pImageInfo[i].sampler);
3855                if (immutable) {
3856                    skipCall |= log_msg(
3857                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3858                        (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS",
3859                        "vkUpdateDescriptorSets: Update #%u is not an immutable sampler %#" PRIxLEAST64
3860                        ", but previous update(s) from this "
3861                        "VkWriteDescriptorSet struct used an immutable sampler. All updates from a single struct must either "
3862                        "use immutable or non-immutable samplers.",
3863                        i, (uint64_t)*pSampler);
3864                }
3865            } else {
3866                if (i > 0 && !immutable) {
3867                    skipCall |= log_msg(
3868                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3869                        (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS",
3870                        "vkUpdateDescriptorSets: Update #%u is an immutable sampler, but previous update(s) from this "
3871                        "VkWriteDescriptorSet struct used a non-immutable sampler. All updates from a single struct must either "
3872                        "use immutable or non-immutable samplers.",
3873                        i);
3874                }
3875                immutable = VK_TRUE;
3876                pSampler = &(pLayoutBinding->pImmutableSamplers[i]);
3877            }
3878            skipCall |= validateSampler(my_data, pSampler, immutable);
3879        }
3880    // Intentionally fall through here to also validate image stuff
3881    case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3882    case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
3883    case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
3884        for (i = 0; i < pWDS->descriptorCount; ++i) {
3885            skipCall |= validateImageView(my_data, &(pWDS->pImageInfo[i].imageView), pWDS->pImageInfo[i].imageLayout);
3886        }
3887        break;
3888    case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
3889    case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
3890        for (i = 0; i < pWDS->descriptorCount; ++i) {
3891            skipCall |= validateBufferView(my_data, &(pWDS->pTexelBufferView[i]));
3892        }
3893        break;
3894    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3895    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3896    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
3897    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
3898        for (i = 0; i < pWDS->descriptorCount; ++i) {
3899            skipCall |= validateBufferInfo(my_data, &(pWDS->pBufferInfo[i]));
3900        }
3901        break;
3902    default:
3903        break;
3904    }
3905    return skipCall;
3906}
3907// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3908// func_str is the name of the calling function
3909// Return VK_FALSE if no errors occur
3910// Return VK_TRUE if validation error occurs and callback returns VK_TRUE (to skip upcoming API call down the chain)
3911VkBool32 validateIdleDescriptorSet(const layer_data *my_data, VkDescriptorSet set, std::string func_str) {
3912    VkBool32 skip_call = VK_FALSE;
3913    auto set_node = my_data->setMap.find(set);
3914    if (set_node == my_data->setMap.end()) {
3915        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3916                             (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3917                             "Cannot call %s() on descriptor set %" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3918                             (uint64_t)(set));
3919    } else {
3920        if (set_node->second->in_use.load()) {
3921            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3922                                 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_OBJECT_INUSE,
3923                                 "DS", "Cannot call %s() on descriptor set %" PRIxLEAST64 " that is in use by a command buffer.",
3924                                 func_str.c_str(), (uint64_t)(set));
3925        }
3926    }
3927    return skip_call;
3928}
3929static void invalidateBoundCmdBuffers(layer_data *dev_data, const SET_NODE *pSet) {
3930    // Flag any CBs this set is bound to as INVALID
3931    for (auto cb : pSet->boundCmdBuffers) {
3932        auto cb_node = dev_data->commandBufferMap.find(cb);
3933        if (cb_node != dev_data->commandBufferMap.end()) {
3934            cb_node->second->state = CB_INVALID;
3935        }
3936    }
3937}
3938// update DS mappings based on write and copy update arrays
3939static VkBool32 dsUpdate(layer_data *my_data, VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pWDS,
3940                         uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pCDS) {
3941    VkBool32 skipCall = VK_FALSE;
3942
3943    LAYOUT_NODE *pLayout = NULL;
3944    VkDescriptorSetLayoutCreateInfo *pLayoutCI = NULL;
3945    // Validate Write updates
3946    uint32_t i = 0;
3947    for (i = 0; i < descriptorWriteCount; i++) {
3948        VkDescriptorSet ds = pWDS[i].dstSet;
3949        SET_NODE *pSet = my_data->setMap[ds];
3950        // Set being updated cannot be in-flight
3951        if ((skipCall = validateIdleDescriptorSet(my_data, ds, "VkUpdateDescriptorSets")) == VK_TRUE)
3952            return skipCall;
3953        // If set is bound to any cmdBuffers, mark them invalid
3954        invalidateBoundCmdBuffers(my_data, pSet);
3955        GENERIC_HEADER *pUpdate = (GENERIC_HEADER *)&pWDS[i];
3956        pLayout = pSet->pLayout;
3957        // First verify valid update struct
3958        if ((skipCall = validUpdateStruct(my_data, device, pUpdate)) == VK_TRUE) {
3959            break;
3960        }
3961        uint32_t binding = 0, endIndex = 0;
3962        binding = pWDS[i].dstBinding;
3963        auto bindingToIndex = pLayout->bindingToIndexMap.find(binding);
3964        // Make sure that layout being updated has the binding being updated
3965        if (bindingToIndex == pLayout->bindingToIndexMap.end()) {
3966            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3967                                (uint64_t)(ds), __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
3968                                "Descriptor Set %" PRIu64 " does not have binding to match "
3969                                "update binding %u for update type "
3970                                "%s!",
3971                                (uint64_t)(ds), binding, string_VkStructureType(pUpdate->sType));
3972        } else {
3973            // Next verify that update falls within size of given binding
3974            endIndex = getUpdateEndIndex(my_data, device, pLayout, binding, pWDS[i].dstArrayElement, pUpdate);
3975            if (getBindingEndIndex(pLayout, binding) < endIndex) {
3976                pLayoutCI = &pLayout->createInfo;
3977                string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS}    ");
3978                skipCall |=
3979                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3980                            (uint64_t)(ds), __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
3981                            "Descriptor update type of %s is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
3982                            string_VkStructureType(pUpdate->sType), binding, DSstr.c_str());
3983            } else { // TODO : should we skip update on a type mismatch or force it?
3984                uint32_t startIndex;
3985                startIndex = getUpdateStartIndex(my_data, device, pLayout, binding, pWDS[i].dstArrayElement, pUpdate);
3986                // Layout bindings match w/ update, now verify that update type
3987                // & stageFlags are the same for entire update
3988                if ((skipCall = validateUpdateConsistency(my_data, device, pLayout, pUpdate, startIndex, endIndex)) == VK_FALSE) {
3989                    // The update is within bounds and consistent, but need to
3990                    // make sure contents make sense as well
3991                    if ((skipCall = validateUpdateContents(my_data, &pWDS[i],
3992                                                           &pLayout->createInfo.pBindings[bindingToIndex->second])) == VK_FALSE) {
3993                        // Update is good. Save the update info
3994                        // Create new update struct for this set's shadow copy
3995                        GENERIC_HEADER *pNewNode = NULL;
3996                        skipCall |= shadowUpdateNode(my_data, device, pUpdate, &pNewNode);
3997                        if (NULL == pNewNode) {
3998                            skipCall |= log_msg(
3999                                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4000                                (uint64_t)(ds), __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
4001                                "Out of memory while attempting to allocate UPDATE struct in vkUpdateDescriptors()");
4002                        } else {
4003                            // Insert shadow node into LL of updates for this set
4004                            pNewNode->pNext = pSet->pUpdateStructs;
4005                            pSet->pUpdateStructs = pNewNode;
4006                            // Now update appropriate descriptor(s) to point to new Update node
4007                            for (uint32_t j = startIndex; j <= endIndex; j++) {
4008                                assert(j < pSet->descriptorCount);
4009                                pSet->pDescriptorUpdates[j] = pNewNode;
4010                            }
4011                        }
4012                    }
4013                }
4014            }
4015        }
4016    }
4017    // Now validate copy updates
4018    for (i = 0; i < descriptorCopyCount; ++i) {
4019        SET_NODE *pSrcSet = NULL, *pDstSet = NULL;
4020        LAYOUT_NODE *pSrcLayout = NULL, *pDstLayout = NULL;
4021        uint32_t srcStartIndex = 0, srcEndIndex = 0, dstStartIndex = 0, dstEndIndex = 0;
4022        // For each copy make sure that update falls within given layout and that types match
4023        pSrcSet = my_data->setMap[pCDS[i].srcSet];
4024        pDstSet = my_data->setMap[pCDS[i].dstSet];
4025        // Set being updated cannot be in-flight
4026        if ((skipCall = validateIdleDescriptorSet(my_data, pDstSet->set, "VkUpdateDescriptorSets")) == VK_TRUE)
4027            return skipCall;
4028        invalidateBoundCmdBuffers(my_data, pDstSet);
4029        pSrcLayout = pSrcSet->pLayout;
4030        pDstLayout = pDstSet->pLayout;
4031        // Validate that src binding is valid for src set layout
4032        if (pSrcLayout->bindingToIndexMap.find(pCDS[i].srcBinding) == pSrcLayout->bindingToIndexMap.end()) {
4033            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4034                                (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
4035                                "Copy descriptor update %u has srcBinding %u "
4036                                "which is out of bounds for underlying SetLayout "
4037                                "%#" PRIxLEAST64 " which only has bindings 0-%u.",
4038                                i, pCDS[i].srcBinding, (uint64_t)pSrcLayout->layout, pSrcLayout->createInfo.bindingCount - 1);
4039        } else if (pDstLayout->bindingToIndexMap.find(pCDS[i].dstBinding) == pDstLayout->bindingToIndexMap.end()) {
4040            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4041                                (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
4042                                "Copy descriptor update %u has dstBinding %u "
4043                                "which is out of bounds for underlying SetLayout "
4044                                "%#" PRIxLEAST64 " which only has bindings 0-%u.",
4045                                i, pCDS[i].dstBinding, (uint64_t)pDstLayout->layout, pDstLayout->createInfo.bindingCount - 1);
4046        } else {
4047            // Proceed with validation. Bindings are ok, but make sure update is within bounds of given layout
4048            srcEndIndex = getUpdateEndIndex(my_data, device, pSrcLayout, pCDS[i].srcBinding, pCDS[i].srcArrayElement,
4049                                            (const GENERIC_HEADER *)&(pCDS[i]));
4050            dstEndIndex = getUpdateEndIndex(my_data, device, pDstLayout, pCDS[i].dstBinding, pCDS[i].dstArrayElement,
4051                                            (const GENERIC_HEADER *)&(pCDS[i]));
4052            if (getBindingEndIndex(pSrcLayout, pCDS[i].srcBinding) < srcEndIndex) {
4053                pLayoutCI = &pSrcLayout->createInfo;
4054                string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS}    ");
4055                skipCall |=
4056                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4057                            (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
4058                            "Copy descriptor src update is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
4059                            pCDS[i].srcBinding, DSstr.c_str());
4060            } else if (getBindingEndIndex(pDstLayout, pCDS[i].dstBinding) < dstEndIndex) {
4061                pLayoutCI = &pDstLayout->createInfo;
4062                string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS}    ");
4063                skipCall |=
4064                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4065                            (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
4066                            "Copy descriptor dest update is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
4067                            pCDS[i].dstBinding, DSstr.c_str());
4068            } else {
4069                srcStartIndex = getUpdateStartIndex(my_data, device, pSrcLayout, pCDS[i].srcBinding, pCDS[i].srcArrayElement,
4070                                                    (const GENERIC_HEADER *)&(pCDS[i]));
4071                dstStartIndex = getUpdateStartIndex(my_data, device, pDstLayout, pCDS[i].dstBinding, pCDS[i].dstArrayElement,
4072                                                    (const GENERIC_HEADER *)&(pCDS[i]));
4073                for (uint32_t j = 0; j < pCDS[i].descriptorCount; ++j) {
4074                    // For copy just make sure that the types match and then perform the update
4075                    if (pSrcLayout->descriptorTypes[srcStartIndex + j] != pDstLayout->descriptorTypes[dstStartIndex + j]) {
4076                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4077                                            __LINE__, DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
4078                                            "Copy descriptor update index %u, update count #%u, has src update descriptor type %s "
4079                                            "that does not match overlapping dest descriptor type of %s!",
4080                                            i, j + 1, string_VkDescriptorType(pSrcLayout->descriptorTypes[srcStartIndex + j]),
4081                                            string_VkDescriptorType(pDstLayout->descriptorTypes[dstStartIndex + j]));
4082                    } else {
4083                        // point dst descriptor at corresponding src descriptor
4084                        // TODO : This may be a hole. I believe copy should be its own copy,
4085                        //  otherwise a subsequent write update to src will incorrectly affect the copy
4086                        pDstSet->pDescriptorUpdates[j + dstStartIndex] = pSrcSet->pDescriptorUpdates[j + srcStartIndex];
4087                        pDstSet->pUpdateStructs = pSrcSet->pUpdateStructs;
4088                    }
4089                }
4090            }
4091        }
4092    }
4093    return skipCall;
4094}
4095
4096// Verify that given pool has descriptors that are being requested for allocation.
4097// NOTE : Calls to this function should be wrapped in mutex
4098static VkBool32 validate_descriptor_availability_in_pool(layer_data *dev_data, DESCRIPTOR_POOL_NODE *pPoolNode, uint32_t count,
4099                                                         const VkDescriptorSetLayout *pSetLayouts) {
4100    VkBool32 skipCall = VK_FALSE;
4101    uint32_t i = 0;
4102    uint32_t j = 0;
4103
4104    // Track number of descriptorSets allowable in this pool
4105    if (pPoolNode->availableSets < count) {
4106        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4107                            reinterpret_cast<uint64_t &>(pPoolNode->pool), __LINE__, DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS",
4108                            "Unable to allocate %u descriptorSets from pool %#" PRIxLEAST64
4109                            ". This pool only has %d descriptorSets remaining.",
4110                            count, reinterpret_cast<uint64_t &>(pPoolNode->pool), pPoolNode->availableSets);
4111    } else {
4112        pPoolNode->availableSets -= count;
4113    }
4114
4115    for (i = 0; i < count; ++i) {
4116        LAYOUT_NODE *pLayout = getLayoutNode(dev_data, pSetLayouts[i]);
4117        if (NULL == pLayout) {
4118            skipCall |=
4119                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
4120                        (uint64_t)pSetLayouts[i], __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
4121                        "Unable to find set layout node for layout %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
4122                        (uint64_t)pSetLayouts[i]);
4123        } else {
4124            uint32_t typeIndex = 0, poolSizeCount = 0;
4125            for (j = 0; j < pLayout->createInfo.bindingCount; ++j) {
4126                typeIndex = static_cast<uint32_t>(pLayout->createInfo.pBindings[j].descriptorType);
4127                poolSizeCount = pLayout->createInfo.pBindings[j].descriptorCount;
4128                if (poolSizeCount > pPoolNode->availableDescriptorTypeCount[typeIndex]) {
4129                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4130                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)pLayout->layout, __LINE__,
4131                                        DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS",
4132                                        "Unable to allocate %u descriptors of type %s from pool %#" PRIxLEAST64
4133                                        ". This pool only has %d descriptors of this type remaining.",
4134                                        poolSizeCount, string_VkDescriptorType(pLayout->createInfo.pBindings[j].descriptorType),
4135                                        (uint64_t)pPoolNode->pool, pPoolNode->availableDescriptorTypeCount[typeIndex]);
4136                } else { // Decrement available descriptors of this type
4137                    pPoolNode->availableDescriptorTypeCount[typeIndex] -= poolSizeCount;
4138                }
4139            }
4140        }
4141    }
4142    return skipCall;
4143}
4144
4145// Free the shadowed update node for this Set
4146// NOTE : Calls to this function should be wrapped in mutex
4147static void freeShadowUpdateTree(SET_NODE *pSet) {
4148    GENERIC_HEADER *pShadowUpdate = pSet->pUpdateStructs;
4149    pSet->pUpdateStructs = NULL;
4150    GENERIC_HEADER *pFreeUpdate = pShadowUpdate;
4151    // Clear the descriptor mappings as they will now be invalid
4152    pSet->pDescriptorUpdates.clear();
4153    while (pShadowUpdate) {
4154        pFreeUpdate = pShadowUpdate;
4155        pShadowUpdate = (GENERIC_HEADER *)pShadowUpdate->pNext;
4156        VkWriteDescriptorSet *pWDS = NULL;
4157        switch (pFreeUpdate->sType) {
4158        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
4159            pWDS = (VkWriteDescriptorSet *)pFreeUpdate;
4160            switch (pWDS->descriptorType) {
4161            case VK_DESCRIPTOR_TYPE_SAMPLER:
4162            case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
4163            case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
4164            case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
4165                delete[] pWDS->pImageInfo;
4166            } break;
4167            case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
4168            case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
4169                delete[] pWDS->pTexelBufferView;
4170            } break;
4171            case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
4172            case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
4173            case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
4174            case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
4175                delete[] pWDS->pBufferInfo;
4176            } break;
4177            default:
4178                break;
4179            }
4180            break;
4181        case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
4182            break;
4183        default:
4184            assert(0);
4185            break;
4186        }
4187        delete pFreeUpdate;
4188    }
4189}
4190
4191// Free all DS Pools including their Sets & related sub-structs
4192// NOTE : Calls to this function should be wrapped in mutex
4193static void deletePools(layer_data *my_data) {
4194    if (my_data->descriptorPoolMap.size() <= 0)
4195        return;
4196    for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
4197        SET_NODE *pSet = (*ii).second->pSets;
4198        SET_NODE *pFreeSet = pSet;
4199        while (pSet) {
4200            pFreeSet = pSet;
4201            pSet = pSet->pNext;
4202            // Freeing layouts handled in deleteLayouts() function
4203            // Free Update shadow struct tree
4204            freeShadowUpdateTree(pFreeSet);
4205            delete pFreeSet;
4206        }
4207        delete (*ii).second;
4208    }
4209    my_data->descriptorPoolMap.clear();
4210}
4211
4212// WARN : Once deleteLayouts() called, any layout ptrs in Pool/Set data structure will be invalid
4213// NOTE : Calls to this function should be wrapped in mutex
4214static void deleteLayouts(layer_data *my_data) {
4215    if (my_data->descriptorSetLayoutMap.size() <= 0)
4216        return;
4217    for (auto ii = my_data->descriptorSetLayoutMap.begin(); ii != my_data->descriptorSetLayoutMap.end(); ++ii) {
4218        LAYOUT_NODE *pLayout = (*ii).second;
4219        if (pLayout->createInfo.pBindings) {
4220            for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
4221                delete[] pLayout->createInfo.pBindings[i].pImmutableSamplers;
4222            }
4223            delete[] pLayout->createInfo.pBindings;
4224        }
4225        delete pLayout;
4226    }
4227    my_data->descriptorSetLayoutMap.clear();
4228}
4229
4230// Currently clearing a set is removing all previous updates to that set
4231//  TODO : Validate if this is correct clearing behavior
4232static void clearDescriptorSet(layer_data *my_data, VkDescriptorSet set) {
4233    SET_NODE *pSet = getSetNode(my_data, set);
4234    if (!pSet) {
4235        // TODO : Return error
4236    } else {
4237        freeShadowUpdateTree(pSet);
4238    }
4239}
4240
4241static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
4242                                VkDescriptorPoolResetFlags flags) {
4243    DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pool);
4244    if (!pPool) {
4245        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4246                (uint64_t)pool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
4247                "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkResetDescriptorPool() call", (uint64_t)pool);
4248    } else {
4249        // TODO: validate flags
4250        // For every set off of this pool, clear it
4251        SET_NODE *pSet = pPool->pSets;
4252        while (pSet) {
4253            clearDescriptorSet(my_data, pSet->set);
4254            pSet = pSet->pNext;
4255        }
4256        // Reset available count to max count for this pool
4257        for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
4258            pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
4259        }
4260    }
4261}
4262
4263// For given CB object, fetch associated CB Node from map
4264static GLOBAL_CB_NODE *getCBNode(layer_data *my_data, const VkCommandBuffer cb) {
4265    if (my_data->commandBufferMap.count(cb) == 0) {
4266        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4267                reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4268                "Attempt to use CommandBuffer %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb));
4269        return NULL;
4270    }
4271    return my_data->commandBufferMap[cb];
4272}
4273
4274// Free all CB Nodes
4275// NOTE : Calls to this function should be wrapped in mutex
4276static void deleteCommandBuffers(layer_data *my_data) {
4277    if (my_data->commandBufferMap.size() <= 0) {
4278        return;
4279    }
4280    for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
4281        delete (*ii).second;
4282    }
4283    my_data->commandBufferMap.clear();
4284}
4285
4286static VkBool32 report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
4287    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4288                   (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
4289                   "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
4290}
4291
4292VkBool32 validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
4293    if (!pCB->activeRenderPass)
4294        return VK_FALSE;
4295    VkBool32 skip_call = VK_FALSE;
4296    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS && cmd_type != CMD_EXECUTECOMMANDS) {
4297        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4298                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4299                             "Commands cannot be called in a subpass using secondary command buffers.");
4300    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
4301        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4302                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4303                             "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
4304    }
4305    return skip_call;
4306}
4307
4308static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4309    if (!(flags & VK_QUEUE_GRAPHICS_BIT))
4310        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4311                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4312                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
4313    return false;
4314}
4315
4316static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4317    if (!(flags & VK_QUEUE_COMPUTE_BIT))
4318        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4319                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4320                       "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
4321    return false;
4322}
4323
4324static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4325    if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
4326        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4327                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4328                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
4329    return false;
4330}
4331
4332// Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not
4333//  in the recording state or if there's an issue with the Cmd ordering
4334static VkBool32 addCmd(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
4335    VkBool32 skipCall = VK_FALSE;
4336    auto pool_data = my_data->commandPoolMap.find(pCB->createInfo.commandPool);
4337    if (pool_data != my_data->commandPoolMap.end()) {
4338        VkQueueFlags flags = my_data->physDevProperties.queue_family_properties[pool_data->second.queueFamilyIndex].queueFlags;
4339        switch (cmd) {
4340        case CMD_BINDPIPELINE:
4341        case CMD_BINDPIPELINEDELTA:
4342        case CMD_BINDDESCRIPTORSETS:
4343        case CMD_FILLBUFFER:
4344        case CMD_CLEARCOLORIMAGE:
4345        case CMD_SETEVENT:
4346        case CMD_RESETEVENT:
4347        case CMD_WAITEVENTS:
4348        case CMD_BEGINQUERY:
4349        case CMD_ENDQUERY:
4350        case CMD_RESETQUERYPOOL:
4351        case CMD_COPYQUERYPOOLRESULTS:
4352        case CMD_WRITETIMESTAMP:
4353            skipCall |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
4354            break;
4355        case CMD_SETVIEWPORTSTATE:
4356        case CMD_SETSCISSORSTATE:
4357        case CMD_SETLINEWIDTHSTATE:
4358        case CMD_SETDEPTHBIASSTATE:
4359        case CMD_SETBLENDSTATE:
4360        case CMD_SETDEPTHBOUNDSSTATE:
4361        case CMD_SETSTENCILREADMASKSTATE:
4362        case CMD_SETSTENCILWRITEMASKSTATE:
4363        case CMD_SETSTENCILREFERENCESTATE:
4364        case CMD_BINDINDEXBUFFER:
4365        case CMD_BINDVERTEXBUFFER:
4366        case CMD_DRAW:
4367        case CMD_DRAWINDEXED:
4368        case CMD_DRAWINDIRECT:
4369        case CMD_DRAWINDEXEDINDIRECT:
4370        case CMD_BLITIMAGE:
4371        case CMD_CLEARATTACHMENTS:
4372        case CMD_CLEARDEPTHSTENCILIMAGE:
4373        case CMD_RESOLVEIMAGE:
4374        case CMD_BEGINRENDERPASS:
4375        case CMD_NEXTSUBPASS:
4376        case CMD_ENDRENDERPASS:
4377            skipCall |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
4378            break;
4379        case CMD_DISPATCH:
4380        case CMD_DISPATCHINDIRECT:
4381            skipCall |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
4382            break;
4383        case CMD_COPYBUFFER:
4384        case CMD_COPYIMAGE:
4385        case CMD_COPYBUFFERTOIMAGE:
4386        case CMD_COPYIMAGETOBUFFER:
4387        case CMD_CLONEIMAGEDATA:
4388        case CMD_UPDATEBUFFER:
4389        case CMD_PIPELINEBARRIER:
4390        case CMD_EXECUTECOMMANDS:
4391            break;
4392        default:
4393            break;
4394        }
4395    }
4396    if (pCB->state != CB_RECORDING) {
4397        skipCall |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
4398        skipCall |= validateCmdsInCmdBuffer(my_data, pCB, cmd);
4399        CMD_NODE cmdNode = {};
4400        // init cmd node and append to end of cmd LL
4401        cmdNode.cmdNumber = ++pCB->numCmds;
4402        cmdNode.type = cmd;
4403        pCB->cmds.push_back(cmdNode);
4404    }
4405    return skipCall;
4406}
4407// Reset the command buffer state
4408//  Maintain the createInfo and set state to CB_NEW, but clear all other state
4409static void resetCB(layer_data *my_data, const VkCommandBuffer cb) {
4410    GLOBAL_CB_NODE *pCB = my_data->commandBufferMap[cb];
4411    if (pCB) {
4412        pCB->cmds.clear();
4413        // Reset CB state (note that createInfo is not cleared)
4414        pCB->commandBuffer = cb;
4415        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
4416        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
4417        pCB->numCmds = 0;
4418        memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
4419        pCB->state = CB_NEW;
4420        pCB->submitCount = 0;
4421        pCB->status = 0;
4422        pCB->viewports.clear();
4423        pCB->scissors.clear();
4424        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4425            // Before clearing lastBoundState, remove any CB bindings from all uniqueBoundSets
4426            for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4427                auto set_node = my_data->setMap.find(set);
4428                if (set_node != my_data->setMap.end()) {
4429                    set_node->second->boundCmdBuffers.erase(pCB->commandBuffer);
4430                }
4431            }
4432            pCB->lastBound[i].reset();
4433        }
4434        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
4435        pCB->activeRenderPass = 0;
4436        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
4437        pCB->activeSubpass = 0;
4438        pCB->framebuffer = 0;
4439        pCB->fenceId = 0;
4440        pCB->lastSubmittedFence = VK_NULL_HANDLE;
4441        pCB->lastSubmittedQueue = VK_NULL_HANDLE;
4442        pCB->destroyedSets.clear();
4443        pCB->updatedSets.clear();
4444        pCB->destroyedFramebuffers.clear();
4445        pCB->waitedEvents.clear();
4446        pCB->semaphores.clear();
4447        pCB->events.clear();
4448        pCB->waitedEventsBeforeQueryReset.clear();
4449        pCB->queryToStateMap.clear();
4450        pCB->activeQueries.clear();
4451        pCB->startedQueries.clear();
4452        pCB->imageLayoutMap.clear();
4453        pCB->eventToStageMap.clear();
4454        pCB->drawData.clear();
4455        pCB->currentDrawData.buffers.clear();
4456        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
4457        pCB->secondaryCommandBuffers.clear();
4458        pCB->activeDescriptorSets.clear();
4459        pCB->validate_functions.clear();
4460        pCB->pMemObjList.clear();
4461        pCB->eventUpdates.clear();
4462    }
4463}
4464
4465// Set PSO-related status bits for CB, including dynamic state set via PSO
4466static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe) {
4467    // Account for any dynamic state not set via this PSO
4468    if (!pPipe->dynStateCI.dynamicStateCount) { // All state is static
4469        pCB->status = CBSTATUS_ALL;
4470    } else {
4471        // First consider all state on
4472        // Then unset any state that's noted as dynamic in PSO
4473        // Finally OR that into CB statemask
4474        CBStatusFlags psoDynStateMask = CBSTATUS_ALL;
4475        for (uint32_t i = 0; i < pPipe->dynStateCI.dynamicStateCount; i++) {
4476            switch (pPipe->dynStateCI.pDynamicStates[i]) {
4477            case VK_DYNAMIC_STATE_VIEWPORT:
4478                psoDynStateMask &= ~CBSTATUS_VIEWPORT_SET;
4479                break;
4480            case VK_DYNAMIC_STATE_SCISSOR:
4481                psoDynStateMask &= ~CBSTATUS_SCISSOR_SET;
4482                break;
4483            case VK_DYNAMIC_STATE_LINE_WIDTH:
4484                psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
4485                break;
4486            case VK_DYNAMIC_STATE_DEPTH_BIAS:
4487                psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
4488                break;
4489            case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
4490                psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
4491                break;
4492            case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
4493                psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
4494                break;
4495            case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
4496                psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
4497                break;
4498            case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
4499                psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
4500                break;
4501            case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
4502                psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
4503                break;
4504            default:
4505                // TODO : Flag error here
4506                break;
4507            }
4508        }
4509        pCB->status |= psoDynStateMask;
4510    }
4511}
4512
4513// Print the last bound Gfx Pipeline
4514static VkBool32 printPipeline(layer_data *my_data, const VkCommandBuffer cb) {
4515    VkBool32 skipCall = VK_FALSE;
4516    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4517    if (pCB) {
4518        PIPELINE_NODE *pPipeTrav = getPipeline(my_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
4519        if (!pPipeTrav) {
4520            // nothing to print
4521        } else {
4522            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4523                                __LINE__, DRAWSTATE_NONE, "DS", "%s",
4524                                vk_print_vkgraphicspipelinecreateinfo(&pPipeTrav->graphicsPipelineCI, "{DS}").c_str());
4525        }
4526    }
4527    return skipCall;
4528}
4529
4530static void printCB(layer_data *my_data, const VkCommandBuffer cb) {
4531    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4532    if (pCB && pCB->cmds.size() > 0) {
4533        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4534                DRAWSTATE_NONE, "DS", "Cmds in CB %p", (void *)cb);
4535        vector<CMD_NODE> cmds = pCB->cmds;
4536        for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) {
4537            // TODO : Need to pass cb as srcObj here
4538            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4539                    __LINE__, DRAWSTATE_NONE, "DS", "  CMD#%" PRIu64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str());
4540        }
4541    } else {
4542        // Nothing to print
4543    }
4544}
4545
4546static VkBool32 synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
4547    VkBool32 skipCall = VK_FALSE;
4548    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
4549        return skipCall;
4550    }
4551    skipCall |= printPipeline(my_data, cb);
4552    return skipCall;
4553}
4554
4555// Flags validation error if the associated call is made inside a render pass. The apiName
4556// routine should ONLY be called outside a render pass.
4557static VkBool32 insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4558    VkBool32 inside = VK_FALSE;
4559    if (pCB->activeRenderPass) {
4560        inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4561                         (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
4562                         "%s: It is invalid to issue this call inside an active render pass (%#" PRIxLEAST64 ")", apiName,
4563                         (uint64_t)pCB->activeRenderPass);
4564    }
4565    return inside;
4566}
4567
4568// Flags validation error if the associated call is made outside a render pass. The apiName
4569// routine should ONLY be called inside a render pass.
4570static VkBool32 outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4571    VkBool32 outside = VK_FALSE;
4572    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
4573        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
4574         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
4575        outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4576                          (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS",
4577                          "%s: This call must be issued inside an active render pass.", apiName);
4578    }
4579    return outside;
4580}
4581
4582static void init_core_validation(layer_data *my_data, const VkAllocationCallbacks *pAllocator) {
4583
4584    layer_debug_actions(my_data->report_data, my_data->logging_callback, pAllocator, "lunarg_core_validation");
4585
4586    if (!globalLockInitialized) {
4587        loader_platform_thread_create_mutex(&globalLock);
4588        globalLockInitialized = 1;
4589    }
4590#if MTMERGESOURCE
4591    // Zero out memory property data
4592    memset(&memProps, 0, sizeof(VkPhysicalDeviceMemoryProperties));
4593#endif
4594}
4595
4596VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4597vkCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
4598    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4599
4600    assert(chain_info->u.pLayerInfo);
4601    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4602    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
4603    if (fpCreateInstance == NULL)
4604        return VK_ERROR_INITIALIZATION_FAILED;
4605
4606    // Advance the link info for the next element on the chain
4607    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4608
4609    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
4610    if (result != VK_SUCCESS)
4611        return result;
4612
4613    layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
4614    my_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
4615    layer_init_instance_dispatch_table(*pInstance, my_data->instance_dispatch_table, fpGetInstanceProcAddr);
4616
4617    my_data->report_data = debug_report_create_instance(my_data->instance_dispatch_table, *pInstance,
4618                                                        pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
4619
4620    init_core_validation(my_data, pAllocator);
4621
4622    ValidateLayerOrdering(*pCreateInfo);
4623
4624    return result;
4625}
4626
4627/* hook DestroyInstance to remove tableInstanceMap entry */
4628VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
4629    // TODOSC : Shouldn't need any customization here
4630    dispatch_key key = get_dispatch_key(instance);
4631    // TBD: Need any locking this early, in case this function is called at the
4632    // same time by more than one thread?
4633    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
4634    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
4635    pTable->DestroyInstance(instance, pAllocator);
4636
4637    loader_platform_thread_lock_mutex(&globalLock);
4638    // Clean up logging callback, if any
4639    while (my_data->logging_callback.size() > 0) {
4640        VkDebugReportCallbackEXT callback = my_data->logging_callback.back();
4641        layer_destroy_msg_callback(my_data->report_data, callback, pAllocator);
4642        my_data->logging_callback.pop_back();
4643    }
4644
4645    layer_debug_report_destroy_instance(my_data->report_data);
4646    delete my_data->instance_dispatch_table;
4647    layer_data_map.erase(key);
4648    loader_platform_thread_unlock_mutex(&globalLock);
4649    if (layer_data_map.empty()) {
4650        // Release mutex when destroying last instance.
4651        loader_platform_thread_delete_mutex(&globalLock);
4652        globalLockInitialized = 0;
4653    }
4654}
4655
4656static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
4657    uint32_t i;
4658    // TBD: Need any locking, in case this function is called at the same time
4659    // by more than one thread?
4660    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4661    dev_data->device_extensions.wsi_enabled = false;
4662
4663    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4664    PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
4665    pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
4666    pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
4667    pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
4668    pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
4669    pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
4670
4671    for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4672        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
4673            dev_data->device_extensions.wsi_enabled = true;
4674    }
4675}
4676
4677VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
4678                                                              const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
4679    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4680
4681    assert(chain_info->u.pLayerInfo);
4682    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4683    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
4684    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
4685    if (fpCreateDevice == NULL) {
4686        return VK_ERROR_INITIALIZATION_FAILED;
4687    }
4688
4689    // Advance the link info for the next element on the chain
4690    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4691
4692    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
4693    if (result != VK_SUCCESS) {
4694        return result;
4695    }
4696
4697    loader_platform_thread_lock_mutex(&globalLock);
4698    layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
4699    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
4700
4701    // Setup device dispatch table
4702    my_device_data->device_dispatch_table = new VkLayerDispatchTable;
4703    layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr);
4704    my_device_data->device = *pDevice;
4705
4706    my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
4707    createDeviceRegisterExtensions(pCreateInfo, *pDevice);
4708    // Get physical device limits for this device
4709    my_instance_data->instance_dispatch_table->GetPhysicalDeviceProperties(gpu, &(my_device_data->physDevProperties.properties));
4710    uint32_t count;
4711    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
4712    my_device_data->physDevProperties.queue_family_properties.resize(count);
4713    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(
4714        gpu, &count, &my_device_data->physDevProperties.queue_family_properties[0]);
4715    // TODO: device limits should make sure these are compatible
4716    if (pCreateInfo->pEnabledFeatures) {
4717        my_device_data->physDevProperties.features = *pCreateInfo->pEnabledFeatures;
4718    } else {
4719        memset(&my_device_data->physDevProperties.features, 0, sizeof(VkPhysicalDeviceFeatures));
4720    }
4721    loader_platform_thread_unlock_mutex(&globalLock);
4722
4723    ValidateLayerOrdering(*pCreateInfo);
4724
4725    return result;
4726}
4727
4728// prototype
4729static void deleteRenderPasses(layer_data *);
4730VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
4731    // TODOSC : Shouldn't need any customization here
4732    dispatch_key key = get_dispatch_key(device);
4733    layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4734    // Free all the memory
4735    loader_platform_thread_lock_mutex(&globalLock);
4736    deletePipelines(dev_data);
4737    deleteRenderPasses(dev_data);
4738    deleteCommandBuffers(dev_data);
4739    deletePools(dev_data);
4740    deleteLayouts(dev_data);
4741    dev_data->imageViewMap.clear();
4742    dev_data->imageMap.clear();
4743    dev_data->imageSubresourceMap.clear();
4744    dev_data->imageLayoutMap.clear();
4745    dev_data->bufferViewMap.clear();
4746    dev_data->bufferMap.clear();
4747    loader_platform_thread_unlock_mutex(&globalLock);
4748#if MTMERGESOURCE
4749    VkBool32 skipCall = VK_FALSE;
4750    loader_platform_thread_lock_mutex(&globalLock);
4751    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4752            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()");
4753    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4754            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================");
4755    print_mem_list(dev_data, device);
4756    printCBList(dev_data, device);
4757    delete_cmd_buf_info_list(dev_data);
4758    // Report any memory leaks
4759    DEVICE_MEM_INFO *pInfo = NULL;
4760    if (dev_data->memObjMap.size() > 0) {
4761        for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
4762            pInfo = &(*ii).second;
4763            if (pInfo->allocInfo.allocationSize != 0) {
4764                // Valid Usage: All child objects created on device must have been destroyed prior to destroying device
4765                skipCall |=
4766                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4767                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK,
4768                            "MEM", "Mem Object %" PRIu64 " has not been freed. You should clean up this memory by calling "
4769                                   "vkFreeMemory(%" PRIu64 ") prior to vkDestroyDevice().",
4770                            (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem));
4771            }
4772        }
4773    }
4774    // Queues persist until device is destroyed
4775    delete_queue_info_list(dev_data);
4776    layer_debug_report_destroy_device(device);
4777    loader_platform_thread_unlock_mutex(&globalLock);
4778
4779#if DISPATCH_MAP_DEBUG
4780    fprintf(stderr, "Device: %p, key: %p\n", device, key);
4781#endif
4782    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4783    if (VK_FALSE == skipCall) {
4784        pDisp->DestroyDevice(device, pAllocator);
4785    }
4786#else
4787    dev_data->device_dispatch_table->DestroyDevice(device, pAllocator);
4788#endif
4789    delete dev_data->device_dispatch_table;
4790    layer_data_map.erase(key);
4791}
4792
4793#if MTMERGESOURCE
4794VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
4795vkGetPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties *pMemoryProperties) {
4796    layer_data *my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
4797    VkLayerInstanceDispatchTable *pInstanceTable = my_data->instance_dispatch_table;
4798    pInstanceTable->GetPhysicalDeviceMemoryProperties(physicalDevice, pMemoryProperties);
4799    memcpy(&memProps, pMemoryProperties, sizeof(VkPhysicalDeviceMemoryProperties));
4800}
4801#endif
4802
4803static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4804
4805VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4806vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
4807    return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
4808}
4809
4810VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4811vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
4812    return util_GetLayerProperties(ARRAY_SIZE(cv_global_layers), cv_global_layers, pCount, pProperties);
4813}
4814
4815// TODO: Why does this exist - can we just use global?
4816static const VkLayerProperties cv_device_layers[] = {{
4817    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
4818}};
4819
4820VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
4821                                                                                    const char *pLayerName, uint32_t *pCount,
4822                                                                                    VkExtensionProperties *pProperties) {
4823    if (pLayerName == NULL) {
4824        dispatch_key key = get_dispatch_key(physicalDevice);
4825        layer_data *my_data = get_my_data_ptr(key, layer_data_map);
4826        return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
4827    } else {
4828        return util_GetExtensionProperties(0, NULL, pCount, pProperties);
4829    }
4830}
4831
4832VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4833vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
4834    /* draw_state physical device layers are the same as global */
4835    return util_GetLayerProperties(ARRAY_SIZE(cv_device_layers), cv_device_layers, pCount, pProperties);
4836}
4837
4838// This validates that the initial layout specified in the command buffer for
4839// the IMAGE is the same
4840// as the global IMAGE layout
4841VkBool32 ValidateCmdBufImageLayouts(VkCommandBuffer cmdBuffer) {
4842    VkBool32 skip_call = VK_FALSE;
4843    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
4844    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
4845    for (auto cb_image_data : pCB->imageLayoutMap) {
4846        VkImageLayout imageLayout;
4847        if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4848            skip_call |=
4849                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4850                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image %" PRIu64 ".",
4851                        reinterpret_cast<const uint64_t &>(cb_image_data.first));
4852        } else {
4853            if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4854                // TODO: Set memory invalid which is in mem_tracker currently
4855            } else if (imageLayout != cb_image_data.second.initialLayout) {
4856                skip_call |=
4857                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4858                            reinterpret_cast<uint64_t &>(cmdBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4859                            "Cannot submit cmd buffer using image (%" PRIx64 ") with layout %s when "
4860                            "first use is %s.",
4861                            reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout),
4862                            string_VkImageLayout(cb_image_data.second.initialLayout));
4863            }
4864            SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4865        }
4866    }
4867    return skip_call;
4868}
4869
4870// Track which resources are in-flight by atomically incrementing their "in_use" count
4871VkBool32 validateAndIncrementResources(layer_data *my_data, GLOBAL_CB_NODE *pCB) {
4872    VkBool32 skip_call = VK_FALSE;
4873    for (auto drawDataElement : pCB->drawData) {
4874        for (auto buffer : drawDataElement.buffers) {
4875            auto buffer_data = my_data->bufferMap.find(buffer);
4876            if (buffer_data == my_data->bufferMap.end()) {
4877                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4878                                     (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4879                                     "Cannot submit cmd buffer using deleted buffer %" PRIu64 ".", (uint64_t)(buffer));
4880            } else {
4881                buffer_data->second.in_use.fetch_add(1);
4882            }
4883        }
4884    }
4885    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4886        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4887            auto setNode = my_data->setMap.find(set);
4888            if (setNode == my_data->setMap.end()) {
4889                skip_call |=
4890                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4891                            (uint64_t)(set), __LINE__, DRAWSTATE_INVALID_DESCRIPTOR_SET, "DS",
4892                            "Cannot submit cmd buffer using deleted descriptor set %" PRIu64 ".", (uint64_t)(set));
4893            } else {
4894                setNode->second->in_use.fetch_add(1);
4895            }
4896        }
4897    }
4898    for (auto semaphore : pCB->semaphores) {
4899        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4900        if (semaphoreNode == my_data->semaphoreMap.end()) {
4901            skip_call |=
4902                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4903                        reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
4904                        "Cannot submit cmd buffer using deleted semaphore %" PRIu64 ".", reinterpret_cast<uint64_t &>(semaphore));
4905        } else {
4906            semaphoreNode->second.in_use.fetch_add(1);
4907        }
4908    }
4909    for (auto event : pCB->events) {
4910        auto eventNode = my_data->eventMap.find(event);
4911        if (eventNode == my_data->eventMap.end()) {
4912            skip_call |=
4913                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4914                        reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
4915                        "Cannot submit cmd buffer using deleted event %" PRIu64 ".", reinterpret_cast<uint64_t &>(event));
4916        } else {
4917            eventNode->second.in_use.fetch_add(1);
4918        }
4919    }
4920    return skip_call;
4921}
4922
4923void decrementResources(layer_data *my_data, VkCommandBuffer cmdBuffer) {
4924    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
4925    for (auto drawDataElement : pCB->drawData) {
4926        for (auto buffer : drawDataElement.buffers) {
4927            auto buffer_data = my_data->bufferMap.find(buffer);
4928            if (buffer_data != my_data->bufferMap.end()) {
4929                buffer_data->second.in_use.fetch_sub(1);
4930            }
4931        }
4932    }
4933    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4934        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4935            auto setNode = my_data->setMap.find(set);
4936            if (setNode != my_data->setMap.end()) {
4937                setNode->second->in_use.fetch_sub(1);
4938            }
4939        }
4940    }
4941    for (auto semaphore : pCB->semaphores) {
4942        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4943        if (semaphoreNode != my_data->semaphoreMap.end()) {
4944            semaphoreNode->second.in_use.fetch_sub(1);
4945        }
4946    }
4947    for (auto event : pCB->events) {
4948        auto eventNode = my_data->eventMap.find(event);
4949        if (eventNode != my_data->eventMap.end()) {
4950            eventNode->second.in_use.fetch_sub(1);
4951        }
4952    }
4953    for (auto queryStatePair : pCB->queryToStateMap) {
4954        my_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4955    }
4956    for (auto eventStagePair : pCB->eventToStageMap) {
4957        my_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4958    }
4959}
4960
4961void decrementResources(layer_data *my_data, uint32_t fenceCount, const VkFence *pFences) {
4962    for (uint32_t i = 0; i < fenceCount; ++i) {
4963        auto fence_data = my_data->fenceMap.find(pFences[i]);
4964        if (fence_data == my_data->fenceMap.end() || !fence_data->second.needsSignaled)
4965            return;
4966        fence_data->second.needsSignaled = false;
4967        fence_data->second.in_use.fetch_sub(1);
4968        decrementResources(my_data, fence_data->second.priorFences.size(), fence_data->second.priorFences.data());
4969        for (auto cmdBuffer : fence_data->second.cmdBuffers) {
4970            decrementResources(my_data, cmdBuffer);
4971        }
4972    }
4973}
4974
4975void decrementResources(layer_data *my_data, VkQueue queue) {
4976    auto queue_data = my_data->queueMap.find(queue);
4977    if (queue_data != my_data->queueMap.end()) {
4978        for (auto cmdBuffer : queue_data->second.untrackedCmdBuffers) {
4979            decrementResources(my_data, cmdBuffer);
4980        }
4981        queue_data->second.untrackedCmdBuffers.clear();
4982        decrementResources(my_data, queue_data->second.lastFences.size(), queue_data->second.lastFences.data());
4983    }
4984}
4985
4986void updateTrackedCommandBuffers(layer_data *dev_data, VkQueue queue, VkQueue other_queue, VkFence fence) {
4987    if (queue == other_queue) {
4988        return;
4989    }
4990    auto queue_data = dev_data->queueMap.find(queue);
4991    auto other_queue_data = dev_data->queueMap.find(other_queue);
4992    if (queue_data == dev_data->queueMap.end() || other_queue_data == dev_data->queueMap.end()) {
4993        return;
4994    }
4995    for (auto fence : other_queue_data->second.lastFences) {
4996        queue_data->second.lastFences.push_back(fence);
4997    }
4998    if (fence != VK_NULL_HANDLE) {
4999        auto fence_data = dev_data->fenceMap.find(fence);
5000        if (fence_data == dev_data->fenceMap.end()) {
5001            return;
5002        }
5003        for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) {
5004            fence_data->second.cmdBuffers.push_back(cmdbuffer);
5005        }
5006        other_queue_data->second.untrackedCmdBuffers.clear();
5007    } else {
5008        for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) {
5009            queue_data->second.untrackedCmdBuffers.push_back(cmdbuffer);
5010        }
5011        other_queue_data->second.untrackedCmdBuffers.clear();
5012    }
5013    for (auto eventStagePair : other_queue_data->second.eventToStageMap) {
5014        queue_data->second.eventToStageMap[eventStagePair.first] = eventStagePair.second;
5015    }
5016}
5017
5018void trackCommandBuffers(layer_data *my_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
5019    auto queue_data = my_data->queueMap.find(queue);
5020    if (fence != VK_NULL_HANDLE) {
5021        vector<VkFence> prior_fences;
5022        auto fence_data = my_data->fenceMap.find(fence);
5023        if (fence_data == my_data->fenceMap.end()) {
5024            return;
5025        }
5026        if (queue_data != my_data->queueMap.end()) {
5027            prior_fences = queue_data->second.lastFences;
5028            queue_data->second.lastFences.clear();
5029            queue_data->second.lastFences.push_back(fence);
5030            for (auto cmdbuffer : queue_data->second.untrackedCmdBuffers) {
5031                fence_data->second.cmdBuffers.push_back(cmdbuffer);
5032            }
5033            queue_data->second.untrackedCmdBuffers.clear();
5034        }
5035        fence_data->second.cmdBuffers.clear();
5036        fence_data->second.priorFences = prior_fences;
5037        fence_data->second.needsSignaled = true;
5038        fence_data->second.queue = queue;
5039        fence_data->second.in_use.fetch_add(1);
5040        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5041            const VkSubmitInfo *submit = &pSubmits[submit_idx];
5042            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5043                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5044                    fence_data->second.cmdBuffers.push_back(secondaryCmdBuffer);
5045                }
5046                fence_data->second.cmdBuffers.push_back(submit->pCommandBuffers[i]);
5047            }
5048        }
5049    } else {
5050        if (queue_data != my_data->queueMap.end()) {
5051            for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5052                const VkSubmitInfo *submit = &pSubmits[submit_idx];
5053                for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5054                    for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5055                        queue_data->second.untrackedCmdBuffers.push_back(secondaryCmdBuffer);
5056                    }
5057                    queue_data->second.untrackedCmdBuffers.push_back(submit->pCommandBuffers[i]);
5058                }
5059            }
5060        }
5061    }
5062    if (queue_data != my_data->queueMap.end()) {
5063        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5064            const VkSubmitInfo *submit = &pSubmits[submit_idx];
5065            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5066                // Add cmdBuffers to both the global set and queue set
5067                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5068                    my_data->globalInFlightCmdBuffers.insert(secondaryCmdBuffer);
5069                    queue_data->second.inFlightCmdBuffers.insert(secondaryCmdBuffer);
5070                }
5071                my_data->globalInFlightCmdBuffers.insert(submit->pCommandBuffers[i]);
5072                queue_data->second.inFlightCmdBuffers.insert(submit->pCommandBuffers[i]);
5073            }
5074        }
5075    }
5076}
5077
5078bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5079    bool skip_call = false;
5080    if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
5081        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
5082        skip_call |=
5083            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5084                    __LINE__, DRAWSTATE_INVALID_FENCE, "DS", "Command Buffer %#" PRIx64 " is already in use and is not marked "
5085                                                             "for simultaneous use.",
5086                    reinterpret_cast<uint64_t>(pCB->commandBuffer));
5087    }
5088    return skip_call;
5089}
5090
5091static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5092    bool skipCall = false;
5093    // Validate that cmd buffers have been updated
5094    if (CB_RECORDED != pCB->state) {
5095        if (CB_INVALID == pCB->state) {
5096            // Inform app of reason CB invalid
5097            bool causeReported = false;
5098            if (!pCB->destroyedSets.empty()) {
5099                std::stringstream set_string;
5100                for (auto set : pCB->destroyedSets)
5101                    set_string << " " << set;
5102
5103                skipCall |=
5104                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5105                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5106                            "You are submitting command buffer %#" PRIxLEAST64
5107                            " that is invalid because it had the following bound descriptor set(s) destroyed: %s",
5108                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
5109                causeReported = true;
5110            }
5111            if (!pCB->updatedSets.empty()) {
5112                std::stringstream set_string;
5113                for (auto set : pCB->updatedSets)
5114                    set_string << " " << set;
5115
5116                skipCall |=
5117                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5118                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5119                            "You are submitting command buffer %#" PRIxLEAST64
5120                            " that is invalid because it had the following bound descriptor set(s) updated: %s",
5121                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
5122                causeReported = true;
5123            }
5124            if (!pCB->destroyedFramebuffers.empty()) {
5125                std::stringstream fb_string;
5126                for (auto fb : pCB->destroyedFramebuffers)
5127                    fb_string << " " << fb;
5128
5129                skipCall |=
5130                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5131                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5132                            "You are submitting command buffer %#" PRIxLEAST64 " that is invalid because it had the following "
5133                            "referenced framebuffers destroyed: %s",
5134                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), fb_string.str().c_str());
5135                causeReported = true;
5136            }
5137            // TODO : This is defensive programming to make sure an error is
5138            //  flagged if we hit this INVALID cmd buffer case and none of the
5139            //  above cases are hit. As the number of INVALID cases grows, this
5140            //  code should be updated to seemlessly handle all the cases.
5141            if (!causeReported) {
5142                skipCall |= log_msg(
5143                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5144                    reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5145                    "You are submitting command buffer %#" PRIxLEAST64 " that is invalid due to an unknown cause. Validation "
5146                    "should "
5147                    "be improved to report the exact cause.",
5148                    reinterpret_cast<uint64_t &>(pCB->commandBuffer));
5149            }
5150        } else { // Flag error for using CB w/o vkEndCommandBuffer() called
5151            skipCall |=
5152                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5153                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
5154                        "You must call vkEndCommandBuffer() on CB %#" PRIxLEAST64 " before this call to vkQueueSubmit()!",
5155                        (uint64_t)(pCB->commandBuffer));
5156        }
5157    }
5158    return skipCall;
5159}
5160
5161static VkBool32 validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5162    // Track in-use for resources off of primary and any secondary CBs
5163    VkBool32 skipCall = validateAndIncrementResources(dev_data, pCB);
5164    if (!pCB->secondaryCommandBuffers.empty()) {
5165        for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
5166            skipCall |= validateAndIncrementResources(dev_data, dev_data->commandBufferMap[secondaryCmdBuffer]);
5167            GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
5168            if (pSubCB->primaryCommandBuffer != pCB->commandBuffer) {
5169                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5170                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
5171                        "CB %#" PRIxLEAST64 " was submitted with secondary buffer %#" PRIxLEAST64
5172                        " but that buffer has subsequently been bound to "
5173                        "primary cmd buffer %#" PRIxLEAST64 ".",
5174                        reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer),
5175                        reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer));
5176            }
5177        }
5178    }
5179    // TODO : Verify if this also needs to be checked for secondary command
5180    //  buffers. If so, this block of code can move to
5181    //   validateCommandBufferState() function. vulkan GL106 filed to clarify
5182    if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
5183        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5184                            __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
5185                            "CB %#" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
5186                            "set, but has been submitted %#" PRIxLEAST64 " times.",
5187                            (uint64_t)(pCB->commandBuffer), pCB->submitCount);
5188    }
5189    skipCall |= validateCommandBufferState(dev_data, pCB);
5190    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
5191    // on device
5192    skipCall |= validateCommandBufferSimultaneousUse(dev_data, pCB);
5193    return skipCall;
5194}
5195
5196VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5197vkQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
5198    VkBool32 skipCall = VK_FALSE;
5199    GLOBAL_CB_NODE *pCBNode = NULL;
5200    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5201    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5202    loader_platform_thread_lock_mutex(&globalLock);
5203#if MTMERGESOURCE
5204    // TODO : Need to track fence and clear mem references when fence clears
5205    // MTMTODO : Merge this code with code below to avoid duplicating efforts
5206    uint64_t fenceId = 0;
5207    skipCall = add_fence_info(dev_data, fence, queue, &fenceId);
5208
5209    print_mem_list(dev_data, queue);
5210    printCBList(dev_data, queue);
5211    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5212        const VkSubmitInfo *submit = &pSubmits[submit_idx];
5213        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
5214            pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
5215            if (pCBNode) {
5216                pCBNode->fenceId = fenceId;
5217                pCBNode->lastSubmittedFence = fence;
5218                pCBNode->lastSubmittedQueue = queue;
5219                for (auto &function : pCBNode->validate_functions) {
5220                    skipCall |= function();
5221                }
5222                for (auto &function : pCBNode->eventUpdates) {
5223                    skipCall |= static_cast<VkBool32>(function(queue));
5224                }
5225            }
5226        }
5227
5228        for (uint32_t i = 0; i < submit->waitSemaphoreCount; i++) {
5229            VkSemaphore sem = submit->pWaitSemaphores[i];
5230
5231            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
5232                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_SIGNALLED) {
5233                    skipCall =
5234                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5235                                (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
5236                                "vkQueueSubmit: Semaphore must be in signaled state before passing to pWaitSemaphores");
5237                }
5238                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_WAIT;
5239            }
5240        }
5241        for (uint32_t i = 0; i < submit->signalSemaphoreCount; i++) {
5242            VkSemaphore sem = submit->pSignalSemaphores[i];
5243
5244            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
5245                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_UNSET) {
5246                    skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5247                                       VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, (uint64_t)sem, __LINE__, MEMTRACK_NONE,
5248                                       "SEMAPHORE", "vkQueueSubmit: Semaphore must not be currently signaled or in a wait state");
5249                }
5250                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
5251            }
5252        }
5253    }
5254#endif
5255    // First verify that fence is not in use
5256    if ((fence != VK_NULL_HANDLE) && (submitCount != 0) && dev_data->fenceMap[fence].in_use.load()) {
5257        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5258                            (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5259                            "Fence %#" PRIx64 " is already in use by another submission.", (uint64_t)(fence));
5260    }
5261    // Now verify each individual submit
5262    std::unordered_set<VkQueue> processed_other_queues;
5263    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5264        const VkSubmitInfo *submit = &pSubmits[submit_idx];
5265        vector<VkSemaphore> semaphoreList;
5266        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
5267            const VkSemaphore &semaphore = submit->pWaitSemaphores[i];
5268            semaphoreList.push_back(semaphore);
5269            if (dev_data->semaphoreMap[semaphore].signaled) {
5270                dev_data->semaphoreMap[semaphore].signaled = 0;
5271            } else {
5272                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5273                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS,
5274                                    "DS", "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
5275                                    reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
5276            }
5277            const VkQueue &other_queue = dev_data->semaphoreMap[semaphore].queue;
5278            if (other_queue != VK_NULL_HANDLE && !processed_other_queues.count(other_queue)) {
5279                updateTrackedCommandBuffers(dev_data, queue, other_queue, fence);
5280                processed_other_queues.insert(other_queue);
5281            }
5282        }
5283        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
5284            const VkSemaphore &semaphore = submit->pSignalSemaphores[i];
5285            semaphoreList.push_back(semaphore);
5286            if (dev_data->semaphoreMap[semaphore].signaled) {
5287                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5288                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS,
5289                                    "DS", "Queue %#" PRIx64 " is signaling semaphore %#" PRIx64
5290                                          " that has already been signaled but not waited on by queue %#" PRIx64 ".",
5291                                    reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore),
5292                                    reinterpret_cast<uint64_t &>(dev_data->semaphoreMap[semaphore].queue));
5293            } else {
5294                dev_data->semaphoreMap[semaphore].signaled = 1;
5295                dev_data->semaphoreMap[semaphore].queue = queue;
5296            }
5297        }
5298        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
5299            skipCall |= ValidateCmdBufImageLayouts(submit->pCommandBuffers[i]);
5300            pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
5301            pCBNode->semaphores = semaphoreList;
5302            pCBNode->submitCount++; // increment submit count
5303            skipCall |= validatePrimaryCommandBufferState(dev_data, pCBNode);
5304        }
5305    }
5306    // Update cmdBuffer-related data structs and mark fence in-use
5307    trackCommandBuffers(dev_data, queue, submitCount, pSubmits, fence);
5308    loader_platform_thread_unlock_mutex(&globalLock);
5309    if (VK_FALSE == skipCall)
5310        result = dev_data->device_dispatch_table->QueueSubmit(queue, submitCount, pSubmits, fence);
5311#if MTMERGESOURCE
5312    loader_platform_thread_lock_mutex(&globalLock);
5313    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5314        const VkSubmitInfo *submit = &pSubmits[submit_idx];
5315        for (uint32_t i = 0; i < submit->waitSemaphoreCount; i++) {
5316            VkSemaphore sem = submit->pWaitSemaphores[i];
5317
5318            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
5319                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_UNSET;
5320            }
5321        }
5322    }
5323    loader_platform_thread_unlock_mutex(&globalLock);
5324#endif
5325    return result;
5326}
5327
5328#if MTMERGESOURCE
5329VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
5330                                                                const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
5331    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5332    VkResult result = my_data->device_dispatch_table->AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
5333    // TODO : Track allocations and overall size here
5334    loader_platform_thread_lock_mutex(&globalLock);
5335    add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo);
5336    print_mem_list(my_data, device);
5337    loader_platform_thread_unlock_mutex(&globalLock);
5338    return result;
5339}
5340
5341VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5342vkFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
5343    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5344
5345    // From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed.
5346    // Before freeing a memory object, an application must ensure the memory object is no longer
5347    // in use by the device—for example by command buffers queued for execution. The memory need
5348    // not yet be unbound from all images and buffers, but any further use of those images or
5349    // buffers (on host or device) for anything other than destroying those objects will result in
5350    // undefined behavior.
5351
5352    loader_platform_thread_lock_mutex(&globalLock);
5353    freeMemObjInfo(my_data, device, mem, VK_FALSE);
5354    print_mem_list(my_data, device);
5355    printCBList(my_data, device);
5356    loader_platform_thread_unlock_mutex(&globalLock);
5357    my_data->device_dispatch_table->FreeMemory(device, mem, pAllocator);
5358}
5359
5360VkBool32 validateMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5361    VkBool32 skipCall = VK_FALSE;
5362
5363    if (size == 0) {
5364        // TODO: a size of 0 is not listed as an invalid use in the spec, should it be?
5365        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5366                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5367                           "VkMapMemory: Attempting to map memory range of size zero");
5368    }
5369
5370    auto mem_element = my_data->memObjMap.find(mem);
5371    if (mem_element != my_data->memObjMap.end()) {
5372        // It is an application error to call VkMapMemory on an object that is already mapped
5373        if (mem_element->second.memRange.size != 0) {
5374            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5375                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5376                               "VkMapMemory: Attempting to map memory on an already-mapped object %#" PRIxLEAST64, (uint64_t)mem);
5377        }
5378
5379        // Validate that offset + size is within object's allocationSize
5380        if (size == VK_WHOLE_SIZE) {
5381            if (offset >= mem_element->second.allocInfo.allocationSize) {
5382                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5383                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5384                                   "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset,
5385                                   mem_element->second.allocInfo.allocationSize, mem_element->second.allocInfo.allocationSize);
5386            }
5387        } else {
5388            if ((offset + size) > mem_element->second.allocInfo.allocationSize) {
5389                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5390                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5391                                   "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset,
5392                                   size + offset, mem_element->second.allocInfo.allocationSize);
5393            }
5394        }
5395    }
5396    return skipCall;
5397}
5398
5399void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5400    auto mem_element = my_data->memObjMap.find(mem);
5401    if (mem_element != my_data->memObjMap.end()) {
5402        MemRange new_range;
5403        new_range.offset = offset;
5404        new_range.size = size;
5405        mem_element->second.memRange = new_range;
5406    }
5407}
5408
5409VkBool32 deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
5410    VkBool32 skipCall = VK_FALSE;
5411    auto mem_element = my_data->memObjMap.find(mem);
5412    if (mem_element != my_data->memObjMap.end()) {
5413        if (!mem_element->second.memRange.size) {
5414            // Valid Usage: memory must currently be mapped
5415            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5416                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5417                               "Unmapping Memory without memory being mapped: mem obj %#" PRIxLEAST64, (uint64_t)mem);
5418        }
5419        mem_element->second.memRange.size = 0;
5420        if (mem_element->second.pData) {
5421            free(mem_element->second.pData);
5422            mem_element->second.pData = 0;
5423        }
5424    }
5425    return skipCall;
5426}
5427
5428static char NoncoherentMemoryFillValue = 0xb;
5429
5430void initializeAndTrackMemory(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize size, void **ppData) {
5431    auto mem_element = my_data->memObjMap.find(mem);
5432    if (mem_element != my_data->memObjMap.end()) {
5433        mem_element->second.pDriverData = *ppData;
5434        uint32_t index = mem_element->second.allocInfo.memoryTypeIndex;
5435        if (memProps.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
5436            mem_element->second.pData = 0;
5437        } else {
5438            if (size == VK_WHOLE_SIZE) {
5439                size = mem_element->second.allocInfo.allocationSize;
5440            }
5441            size_t convSize = (size_t)(size);
5442            mem_element->second.pData = malloc(2 * convSize);
5443            memset(mem_element->second.pData, NoncoherentMemoryFillValue, 2 * convSize);
5444            *ppData = static_cast<char *>(mem_element->second.pData) + (convSize / 2);
5445        }
5446    }
5447}
5448#endif
5449// Note: This function assumes that the global lock is held by the calling
5450// thread.
5451VkBool32 cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) {
5452    VkBool32 skip_call = VK_FALSE;
5453    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
5454    if (pCB) {
5455        for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) {
5456            for (auto event : queryEventsPair.second) {
5457                if (my_data->eventMap[event].needsSignaled) {
5458                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5459                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
5460                                         "Cannot get query results on queryPool %" PRIu64
5461                                         " with index %d which was guarded by unsignaled event %" PRIu64 ".",
5462                                         (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
5463                }
5464            }
5465        }
5466    }
5467    return skip_call;
5468}
5469// Remove given cmd_buffer from the global inFlight set.
5470//  Also, if given queue is valid, then remove the cmd_buffer from that queues
5471//  inFlightCmdBuffer set. Finally, check all other queues and if given cmd_buffer
5472//  is still in flight on another queue, add it back into the global set.
5473// Note: This function assumes that the global lock is held by the calling
5474// thread.
5475static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkQueue queue) {
5476    // Pull it off of global list initially, but if we find it in any other queue list, add it back in
5477    dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
5478    if (dev_data->queueMap.find(queue) != dev_data->queueMap.end()) {
5479        dev_data->queueMap[queue].inFlightCmdBuffers.erase(cmd_buffer);
5480        for (auto q : dev_data->queues) {
5481            if ((q != queue) &&
5482                (dev_data->queueMap[q].inFlightCmdBuffers.find(cmd_buffer) != dev_data->queueMap[q].inFlightCmdBuffers.end())) {
5483                dev_data->globalInFlightCmdBuffers.insert(cmd_buffer);
5484                break;
5485            }
5486        }
5487    }
5488}
5489#if MTMERGESOURCE
5490static inline bool verifyFenceStatus(VkDevice device, VkFence fence, const char *apiCall) {
5491    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5492    VkBool32 skipCall = false;
5493    auto pFenceInfo = my_data->fenceMap.find(fence);
5494    if (pFenceInfo != my_data->fenceMap.end()) {
5495        if (pFenceInfo->second.firstTimeFlag != VK_TRUE) {
5496            if ((pFenceInfo->second.createInfo.flags & VK_FENCE_CREATE_SIGNALED_BIT) &&
5497                pFenceInfo->second.firstTimeFlag != VK_TRUE) {
5498                skipCall |=
5499                    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5500                            (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5501                            "%s specified fence %#" PRIxLEAST64 " already in SIGNALED state.", apiCall, (uint64_t)fence);
5502            }
5503            if (!pFenceInfo->second.queue && !pFenceInfo->second.swapchain) { // Checking status of unsubmitted fence
5504                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5505                                    reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5506                                    "%s called for fence %#" PRIxLEAST64 " which has not been submitted on a Queue or during "
5507                                    "acquire next image.",
5508                                    apiCall, reinterpret_cast<uint64_t &>(fence));
5509            }
5510        } else {
5511            pFenceInfo->second.firstTimeFlag = VK_FALSE;
5512        }
5513    }
5514    return skipCall;
5515}
5516#endif
5517VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5518vkWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
5519    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5520    VkBool32 skip_call = VK_FALSE;
5521#if MTMERGESOURCE
5522    // Verify fence status of submitted fences
5523    loader_platform_thread_lock_mutex(&globalLock);
5524    for (uint32_t i = 0; i < fenceCount; i++) {
5525        skip_call |= verifyFenceStatus(device, pFences[i], "vkWaitForFences");
5526    }
5527    loader_platform_thread_unlock_mutex(&globalLock);
5528    if (skip_call)
5529        return VK_ERROR_VALIDATION_FAILED_EXT;
5530#endif
5531    VkResult result = dev_data->device_dispatch_table->WaitForFences(device, fenceCount, pFences, waitAll, timeout);
5532
5533    if (result == VK_SUCCESS) {
5534        loader_platform_thread_lock_mutex(&globalLock);
5535        // When we know that all fences are complete we can clean/remove their CBs
5536        if (waitAll || fenceCount == 1) {
5537            for (uint32_t i = 0; i < fenceCount; ++i) {
5538#if MTMERGESOURCE
5539                update_fence_tracking(dev_data, pFences[i]);
5540#endif
5541                VkQueue fence_queue = dev_data->fenceMap[pFences[i]].queue;
5542                for (auto cmdBuffer : dev_data->fenceMap[pFences[i]].cmdBuffers) {
5543                    skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5544                    removeInFlightCmdBuffer(dev_data, cmdBuffer, fence_queue);
5545                }
5546            }
5547            decrementResources(dev_data, fenceCount, pFences);
5548        }
5549        // NOTE : Alternate case not handled here is when some fences have completed. In
5550        //  this case for app to guarantee which fences completed it will have to call
5551        //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
5552        loader_platform_thread_unlock_mutex(&globalLock);
5553    }
5554    if (VK_FALSE != skip_call)
5555        return VK_ERROR_VALIDATION_FAILED_EXT;
5556    return result;
5557}
5558
5559VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(VkDevice device, VkFence fence) {
5560    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5561    bool skipCall = false;
5562    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5563#if MTMERGESOURCE
5564    loader_platform_thread_lock_mutex(&globalLock);
5565    skipCall = verifyFenceStatus(device, fence, "vkGetFenceStatus");
5566    loader_platform_thread_unlock_mutex(&globalLock);
5567    if (skipCall)
5568        return result;
5569#endif
5570    result = dev_data->device_dispatch_table->GetFenceStatus(device, fence);
5571    VkBool32 skip_call = VK_FALSE;
5572    loader_platform_thread_lock_mutex(&globalLock);
5573    if (result == VK_SUCCESS) {
5574#if MTMERGESOURCE
5575        update_fence_tracking(dev_data, fence);
5576#endif
5577        auto fence_queue = dev_data->fenceMap[fence].queue;
5578        for (auto cmdBuffer : dev_data->fenceMap[fence].cmdBuffers) {
5579            skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5580            removeInFlightCmdBuffer(dev_data, cmdBuffer, fence_queue);
5581        }
5582        decrementResources(dev_data, 1, &fence);
5583    }
5584    loader_platform_thread_unlock_mutex(&globalLock);
5585    if (VK_FALSE != skip_call)
5586        return VK_ERROR_VALIDATION_FAILED_EXT;
5587    return result;
5588}
5589
5590VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
5591                                                            VkQueue *pQueue) {
5592    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5593    dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
5594    loader_platform_thread_lock_mutex(&globalLock);
5595
5596    // Add queue to tracking set only if it is new
5597    auto result = dev_data->queues.emplace(*pQueue);
5598    if (result.second == true) {
5599        QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue];
5600        pQNode->device = device;
5601#if MTMERGESOURCE
5602        pQNode->lastRetiredId = 0;
5603        pQNode->lastSubmittedId = 0;
5604#endif
5605    }
5606
5607    loader_platform_thread_unlock_mutex(&globalLock);
5608}
5609
5610VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(VkQueue queue) {
5611    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5612    decrementResources(dev_data, queue);
5613    VkBool32 skip_call = VK_FALSE;
5614    loader_platform_thread_lock_mutex(&globalLock);
5615    // Iterate over local set since we erase set members as we go in for loop
5616    auto local_cb_set = dev_data->queueMap[queue].inFlightCmdBuffers;
5617    for (auto cmdBuffer : local_cb_set) {
5618        skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5619        removeInFlightCmdBuffer(dev_data, cmdBuffer, queue);
5620    }
5621    dev_data->queueMap[queue].inFlightCmdBuffers.clear();
5622    loader_platform_thread_unlock_mutex(&globalLock);
5623    if (VK_FALSE != skip_call)
5624        return VK_ERROR_VALIDATION_FAILED_EXT;
5625    VkResult result = dev_data->device_dispatch_table->QueueWaitIdle(queue);
5626#if MTMERGESOURCE
5627    if (VK_SUCCESS == result) {
5628        loader_platform_thread_lock_mutex(&globalLock);
5629        retire_queue_fences(dev_data, queue);
5630        loader_platform_thread_unlock_mutex(&globalLock);
5631    }
5632#endif
5633    return result;
5634}
5635
5636VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(VkDevice device) {
5637    VkBool32 skip_call = VK_FALSE;
5638    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5639    loader_platform_thread_lock_mutex(&globalLock);
5640    for (auto queue : dev_data->queues) {
5641        decrementResources(dev_data, queue);
5642        if (dev_data->queueMap.find(queue) != dev_data->queueMap.end()) {
5643            // Clear all of the queue inFlightCmdBuffers (global set cleared below)
5644            dev_data->queueMap[queue].inFlightCmdBuffers.clear();
5645        }
5646    }
5647    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5648        skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5649    }
5650    dev_data->globalInFlightCmdBuffers.clear();
5651    loader_platform_thread_unlock_mutex(&globalLock);
5652    if (VK_FALSE != skip_call)
5653        return VK_ERROR_VALIDATION_FAILED_EXT;
5654    VkResult result = dev_data->device_dispatch_table->DeviceWaitIdle(device);
5655#if MTMERGESOURCE
5656    if (VK_SUCCESS == result) {
5657        loader_platform_thread_lock_mutex(&globalLock);
5658        retire_device_fences(dev_data, device);
5659        loader_platform_thread_unlock_mutex(&globalLock);
5660    }
5661#endif
5662    return result;
5663}
5664
5665VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
5666    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5667    bool skipCall = false;
5668    loader_platform_thread_lock_mutex(&globalLock);
5669    if (dev_data->fenceMap[fence].in_use.load()) {
5670        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5671                            (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5672                            "Fence %#" PRIx64 " is in use by a command buffer.", (uint64_t)(fence));
5673    }
5674#if MTMERGESOURCE
5675    delete_fence_info(dev_data, fence);
5676    auto item = dev_data->fenceMap.find(fence);
5677    if (item != dev_data->fenceMap.end()) {
5678        dev_data->fenceMap.erase(item);
5679    }
5680#endif
5681    loader_platform_thread_unlock_mutex(&globalLock);
5682    if (!skipCall)
5683        dev_data->device_dispatch_table->DestroyFence(device, fence, pAllocator);
5684}
5685
5686VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5687vkDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
5688    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5689    dev_data->device_dispatch_table->DestroySemaphore(device, semaphore, pAllocator);
5690    loader_platform_thread_lock_mutex(&globalLock);
5691    auto item = dev_data->semaphoreMap.find(semaphore);
5692    if (item != dev_data->semaphoreMap.end()) {
5693        if (item->second.in_use.load()) {
5694            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5695                    reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
5696                    "Cannot delete semaphore %" PRIx64 " which is in use.", reinterpret_cast<uint64_t &>(semaphore));
5697        }
5698        dev_data->semaphoreMap.erase(semaphore);
5699    }
5700    loader_platform_thread_unlock_mutex(&globalLock);
5701    // TODO : Clean up any internal data structures using this obj.
5702}
5703
5704VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
5705    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5706    bool skip_call = false;
5707    loader_platform_thread_lock_mutex(&globalLock);
5708    auto event_data = dev_data->eventMap.find(event);
5709    if (event_data != dev_data->eventMap.end()) {
5710        if (event_data->second.in_use.load()) {
5711            skip_call |= log_msg(
5712                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5713                reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
5714                "Cannot delete event %" PRIx64 " which is in use by a command buffer.", reinterpret_cast<uint64_t &>(event));
5715        }
5716        dev_data->eventMap.erase(event_data);
5717    }
5718    loader_platform_thread_unlock_mutex(&globalLock);
5719    if (!skip_call)
5720        dev_data->device_dispatch_table->DestroyEvent(device, event, pAllocator);
5721    // TODO : Clean up any internal data structures using this obj.
5722}
5723
5724VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5725vkDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
5726    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5727        ->device_dispatch_table->DestroyQueryPool(device, queryPool, pAllocator);
5728    // TODO : Clean up any internal data structures using this obj.
5729}
5730
5731VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
5732                                                     uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
5733                                                     VkQueryResultFlags flags) {
5734    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5735    unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight;
5736    GLOBAL_CB_NODE *pCB = nullptr;
5737    loader_platform_thread_lock_mutex(&globalLock);
5738    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5739        pCB = getCBNode(dev_data, cmdBuffer);
5740        for (auto queryStatePair : pCB->queryToStateMap) {
5741            queriesInFlight[queryStatePair.first].push_back(cmdBuffer);
5742        }
5743    }
5744    VkBool32 skip_call = VK_FALSE;
5745    for (uint32_t i = 0; i < queryCount; ++i) {
5746        QueryObject query = {queryPool, firstQuery + i};
5747        auto queryElement = queriesInFlight.find(query);
5748        auto queryToStateElement = dev_data->queryToStateMap.find(query);
5749        if (queryToStateElement != dev_data->queryToStateMap.end()) {
5750        }
5751        // Available and in flight
5752        if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5753            queryToStateElement->second) {
5754            for (auto cmdBuffer : queryElement->second) {
5755                pCB = getCBNode(dev_data, cmdBuffer);
5756                auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query);
5757                if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) {
5758                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5759                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5760                                         "Cannot get query results on queryPool %" PRIu64 " with index %d which is in flight.",
5761                                         (uint64_t)(queryPool), firstQuery + i);
5762                } else {
5763                    for (auto event : queryEventElement->second) {
5764                        dev_data->eventMap[event].needsSignaled = true;
5765                    }
5766                }
5767            }
5768            // Unavailable and in flight
5769        } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5770                   !queryToStateElement->second) {
5771            // TODO : Can there be the same query in use by multiple command buffers in flight?
5772            bool make_available = false;
5773            for (auto cmdBuffer : queryElement->second) {
5774                pCB = getCBNode(dev_data, cmdBuffer);
5775                make_available |= pCB->queryToStateMap[query];
5776            }
5777            if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5778                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5779                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5780                                     "Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.",
5781                                     (uint64_t)(queryPool), firstQuery + i);
5782            }
5783            // Unavailable
5784        } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) {
5785            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT,
5786                                 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5787                                 "Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.",
5788                                 (uint64_t)(queryPool), firstQuery + i);
5789            // Unitialized
5790        } else if (queryToStateElement == dev_data->queryToStateMap.end()) {
5791            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT,
5792                                 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5793                                 "Cannot get query results on queryPool %" PRIu64 " with index %d as data has not been collected for this index.",
5794                                 (uint64_t)(queryPool), firstQuery + i);
5795        }
5796    }
5797    loader_platform_thread_unlock_mutex(&globalLock);
5798    if (skip_call)
5799        return VK_ERROR_VALIDATION_FAILED_EXT;
5800    return dev_data->device_dispatch_table->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride,
5801                                                                flags);
5802}
5803
5804VkBool32 validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5805    VkBool32 skip_call = VK_FALSE;
5806    auto buffer_data = my_data->bufferMap.find(buffer);
5807    if (buffer_data == my_data->bufferMap.end()) {
5808        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5809                             (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5810                             "Cannot free buffer %" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5811    } else {
5812        if (buffer_data->second.in_use.load()) {
5813            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5814                                 (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
5815                                 "Cannot free buffer %" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer));
5816        }
5817    }
5818    return skip_call;
5819}
5820
5821VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5822vkDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
5823    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5824    VkBool32 skipCall = VK_FALSE;
5825    loader_platform_thread_lock_mutex(&globalLock);
5826#if MTMERGESOURCE
5827    auto item = dev_data->bufferBindingMap.find((uint64_t)buffer);
5828    if (item != dev_data->bufferBindingMap.end()) {
5829        skipCall = clear_object_binding(dev_data, device, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5830        dev_data->bufferBindingMap.erase(item);
5831    }
5832#endif
5833    if (!validateIdleBuffer(dev_data, buffer) && (VK_FALSE == skipCall)) {
5834        loader_platform_thread_unlock_mutex(&globalLock);
5835        dev_data->device_dispatch_table->DestroyBuffer(device, buffer, pAllocator);
5836        loader_platform_thread_lock_mutex(&globalLock);
5837    }
5838    dev_data->bufferMap.erase(buffer);
5839    loader_platform_thread_unlock_mutex(&globalLock);
5840}
5841
5842VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5843vkDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5844    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5845    dev_data->device_dispatch_table->DestroyBufferView(device, bufferView, pAllocator);
5846    loader_platform_thread_lock_mutex(&globalLock);
5847    auto item = dev_data->bufferViewMap.find(bufferView);
5848    if (item != dev_data->bufferViewMap.end()) {
5849        dev_data->bufferViewMap.erase(item);
5850    }
5851    loader_platform_thread_unlock_mutex(&globalLock);
5852}
5853
5854VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5855    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5856    VkBool32 skipCall = VK_FALSE;
5857#if MTMERGESOURCE
5858    loader_platform_thread_lock_mutex(&globalLock);
5859    auto item = dev_data->imageBindingMap.find((uint64_t)image);
5860    if (item != dev_data->imageBindingMap.end()) {
5861        skipCall = clear_object_binding(dev_data, device, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
5862        dev_data->imageBindingMap.erase(item);
5863    }
5864    loader_platform_thread_unlock_mutex(&globalLock);
5865#endif
5866    if (VK_FALSE == skipCall)
5867        dev_data->device_dispatch_table->DestroyImage(device, image, pAllocator);
5868
5869    loader_platform_thread_lock_mutex(&globalLock);
5870    const auto& entry = dev_data->imageMap.find(image);
5871    if (entry != dev_data->imageMap.end()) {
5872        // Clear any memory mapping for this image
5873        const auto &mem_entry = dev_data->memObjMap.find(entry->second.mem);
5874        if (mem_entry != dev_data->memObjMap.end())
5875            mem_entry->second.image = VK_NULL_HANDLE;
5876
5877        // Remove image from imageMap
5878        dev_data->imageMap.erase(entry);
5879    }
5880    const auto& subEntry = dev_data->imageSubresourceMap.find(image);
5881    if (subEntry != dev_data->imageSubresourceMap.end()) {
5882        for (const auto& pair : subEntry->second) {
5883            dev_data->imageLayoutMap.erase(pair);
5884        }
5885        dev_data->imageSubresourceMap.erase(subEntry);
5886    }
5887    loader_platform_thread_unlock_mutex(&globalLock);
5888}
5889#if MTMERGESOURCE
5890VkBool32 print_memory_range_error(layer_data *dev_data, const uint64_t object_handle, const uint64_t other_handle,
5891                                  VkDebugReportObjectTypeEXT object_type) {
5892    if (object_type == VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT) {
5893        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5894                       MEMTRACK_INVALID_ALIASING, "MEM", "Buffer %" PRIx64 " is alised with image %" PRIx64, object_handle,
5895                       other_handle);
5896    } else {
5897        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5898                       MEMTRACK_INVALID_ALIASING, "MEM", "Image %" PRIx64 " is alised with buffer %" PRIx64, object_handle,
5899                       other_handle);
5900    }
5901}
5902
5903VkBool32 validate_memory_range(layer_data *dev_data, const vector<MEMORY_RANGE> &ranges, const MEMORY_RANGE &new_range,
5904                               VkDebugReportObjectTypeEXT object_type) {
5905    VkBool32 skip_call = false;
5906
5907    for (auto range : ranges) {
5908        if ((range.end & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)) <
5909            (new_range.start & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)))
5910            continue;
5911        if ((range.start & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)) >
5912            (new_range.end & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)))
5913            continue;
5914        skip_call |= print_memory_range_error(dev_data, new_range.handle, range.handle, object_type);
5915    }
5916    return skip_call;
5917}
5918
5919VkBool32 validate_buffer_image_aliasing(layer_data *dev_data, uint64_t handle, VkDeviceMemory mem, VkDeviceSize memoryOffset,
5920                                        VkMemoryRequirements memRequirements, vector<MEMORY_RANGE> &ranges,
5921                                        const vector<MEMORY_RANGE> &other_ranges, VkDebugReportObjectTypeEXT object_type) {
5922    MEMORY_RANGE range;
5923    range.handle = handle;
5924    range.memory = mem;
5925    range.start = memoryOffset;
5926    range.end = memoryOffset + memRequirements.size - 1;
5927    ranges.push_back(range);
5928    return validate_memory_range(dev_data, other_ranges, range, object_type);
5929}
5930
5931VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5932vkBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
5933    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5934    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5935    loader_platform_thread_lock_mutex(&globalLock);
5936    // Track objects tied to memory
5937    uint64_t buffer_handle = (uint64_t)(buffer);
5938    VkBool32 skipCall =
5939        set_mem_binding(dev_data, device, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
5940    add_object_binding_info(dev_data, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, mem);
5941    {
5942        VkMemoryRequirements memRequirements;
5943        // MTMTODO : Shouldn't this call down the chain?
5944        vkGetBufferMemoryRequirements(device, buffer, &memRequirements);
5945        skipCall |= validate_buffer_image_aliasing(dev_data, buffer_handle, mem, memoryOffset, memRequirements,
5946                                                   dev_data->memObjMap[mem].bufferRanges, dev_data->memObjMap[mem].imageRanges,
5947                                                   VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5948    }
5949    print_mem_list(dev_data, device);
5950    loader_platform_thread_unlock_mutex(&globalLock);
5951    if (VK_FALSE == skipCall) {
5952        result = dev_data->device_dispatch_table->BindBufferMemory(device, buffer, mem, memoryOffset);
5953    }
5954    return result;
5955}
5956
5957VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5958vkGetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
5959    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5960    // TODO : What to track here?
5961    //   Could potentially save returned mem requirements and validate values passed into BindBufferMemory
5962    my_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
5963}
5964
5965VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5966vkGetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5967    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5968    // TODO : What to track here?
5969    //   Could potentially save returned mem requirements and validate values passed into BindImageMemory
5970    my_data->device_dispatch_table->GetImageMemoryRequirements(device, image, pMemoryRequirements);
5971}
5972#endif
5973VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5974vkDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
5975    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5976        ->device_dispatch_table->DestroyImageView(device, imageView, pAllocator);
5977    // TODO : Clean up any internal data structures using this obj.
5978}
5979
5980VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5981vkDestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
5982    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5983
5984    loader_platform_thread_lock_mutex(&globalLock);
5985
5986    my_data->shaderModuleMap.erase(shaderModule);
5987
5988    loader_platform_thread_unlock_mutex(&globalLock);
5989
5990    my_data->device_dispatch_table->DestroyShaderModule(device, shaderModule, pAllocator);
5991}
5992
5993VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5994vkDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
5995    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyPipeline(device, pipeline, pAllocator);
5996    // TODO : Clean up any internal data structures using this obj.
5997}
5998
5999VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6000vkDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
6001    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6002        ->device_dispatch_table->DestroyPipelineLayout(device, pipelineLayout, pAllocator);
6003    // TODO : Clean up any internal data structures using this obj.
6004}
6005
6006VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6007vkDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
6008    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroySampler(device, sampler, pAllocator);
6009    // TODO : Clean up any internal data structures using this obj.
6010}
6011
6012VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6013vkDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
6014    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6015        ->device_dispatch_table->DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
6016    // TODO : Clean up any internal data structures using this obj.
6017}
6018
6019VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6020vkDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
6021    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6022        ->device_dispatch_table->DestroyDescriptorPool(device, descriptorPool, pAllocator);
6023    // TODO : Clean up any internal data structures using this obj.
6024}
6025
6026VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6027vkFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
6028    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6029
6030    bool skip_call = false;
6031    loader_platform_thread_lock_mutex(&globalLock);
6032    for (uint32_t i = 0; i < commandBufferCount; i++) {
6033#if MTMERGESOURCE
6034        clear_cmd_buf_and_mem_references(dev_data, pCommandBuffers[i]);
6035#endif
6036        if (dev_data->globalInFlightCmdBuffers.count(pCommandBuffers[i])) {
6037            skip_call |=
6038                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6039                        reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6040                        "Attempt to free command buffer (%#" PRIxLEAST64 ") which is in use.",
6041                        reinterpret_cast<uint64_t>(pCommandBuffers[i]));
6042        }
6043        // Delete CB information structure, and remove from commandBufferMap
6044        auto cb = dev_data->commandBufferMap.find(pCommandBuffers[i]);
6045        if (cb != dev_data->commandBufferMap.end()) {
6046            // reset prior to delete for data clean-up
6047            resetCB(dev_data, (*cb).second->commandBuffer);
6048            delete (*cb).second;
6049            dev_data->commandBufferMap.erase(cb);
6050        }
6051
6052        // Remove commandBuffer reference from commandPoolMap
6053        dev_data->commandPoolMap[commandPool].commandBuffers.remove(pCommandBuffers[i]);
6054    }
6055#if MTMERGESOURCE
6056    printCBList(dev_data, device);
6057#endif
6058    loader_platform_thread_unlock_mutex(&globalLock);
6059
6060    if (!skip_call)
6061        dev_data->device_dispatch_table->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
6062}
6063
6064VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
6065                                                                   const VkAllocationCallbacks *pAllocator,
6066                                                                   VkCommandPool *pCommandPool) {
6067    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6068
6069    VkResult result = dev_data->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
6070
6071    if (VK_SUCCESS == result) {
6072        loader_platform_thread_lock_mutex(&globalLock);
6073        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
6074        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
6075        loader_platform_thread_unlock_mutex(&globalLock);
6076    }
6077    return result;
6078}
6079
6080VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
6081                                                                 const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
6082
6083    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6084    VkResult result = dev_data->device_dispatch_table->CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
6085    if (result == VK_SUCCESS) {
6086        loader_platform_thread_lock_mutex(&globalLock);
6087        dev_data->queryPoolMap[*pQueryPool].createInfo = *pCreateInfo;
6088        loader_platform_thread_unlock_mutex(&globalLock);
6089    }
6090    return result;
6091}
6092
6093VkBool32 validateCommandBuffersNotInUse(const layer_data *dev_data, VkCommandPool commandPool) {
6094    VkBool32 skipCall = VK_FALSE;
6095    auto pool_data = dev_data->commandPoolMap.find(commandPool);
6096    if (pool_data != dev_data->commandPoolMap.end()) {
6097        for (auto cmdBuffer : pool_data->second.commandBuffers) {
6098            if (dev_data->globalInFlightCmdBuffers.count(cmdBuffer)) {
6099                skipCall |=
6100                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT,
6101                            (uint64_t)(commandPool), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
6102                            "Cannot reset command pool %" PRIx64 " when allocated command buffer %" PRIx64 " is in use.",
6103                            (uint64_t)(commandPool), (uint64_t)(cmdBuffer));
6104            }
6105        }
6106    }
6107    return skipCall;
6108}
6109
6110// Destroy commandPool along with all of the commandBuffers allocated from that pool
6111VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6112vkDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
6113    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6114    bool commandBufferComplete = false;
6115    bool skipCall = false;
6116    loader_platform_thread_lock_mutex(&globalLock);
6117#if MTMERGESOURCE
6118    // Verify that command buffers in pool are complete (not in-flight)
6119    // MTMTODO : Merge this with code below (separate *NotInUse() call)
6120    for (auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6121         it != dev_data->commandPoolMap[commandPool].commandBuffers.end(); it++) {
6122        commandBufferComplete = VK_FALSE;
6123        skipCall = checkCBCompleted(dev_data, *it, &commandBufferComplete);
6124        if (VK_FALSE == commandBufferComplete) {
6125            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6126                                (uint64_t)(*it), __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6127                                "Destroying Command Pool 0x%" PRIxLEAST64 " before "
6128                                "its command buffer (0x%" PRIxLEAST64 ") has completed.",
6129                                (uint64_t)(commandPool), reinterpret_cast<uint64_t>(*it));
6130        }
6131    }
6132#endif
6133    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandPoolMap
6134    if (dev_data->commandPoolMap.find(commandPool) != dev_data->commandPoolMap.end()) {
6135        for (auto poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6136             poolCb != dev_data->commandPoolMap[commandPool].commandBuffers.end();) {
6137            auto del_cb = dev_data->commandBufferMap.find(*poolCb);
6138            delete (*del_cb).second;                  // delete CB info structure
6139            dev_data->commandBufferMap.erase(del_cb); // Remove this command buffer
6140            poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.erase(
6141                poolCb); // Remove CB reference from commandPoolMap's list
6142        }
6143    }
6144    dev_data->commandPoolMap.erase(commandPool);
6145
6146    loader_platform_thread_unlock_mutex(&globalLock);
6147
6148    if (VK_TRUE == validateCommandBuffersNotInUse(dev_data, commandPool))
6149        return;
6150
6151    if (!skipCall)
6152        dev_data->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator);
6153#if MTMERGESOURCE
6154    loader_platform_thread_lock_mutex(&globalLock);
6155    auto item = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6156    // Remove command buffers from command buffer map
6157    while (item != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
6158        auto del_item = item++;
6159        delete_cmd_buf_info(dev_data, commandPool, *del_item);
6160    }
6161    dev_data->commandPoolMap.erase(commandPool);
6162    loader_platform_thread_unlock_mutex(&globalLock);
6163#endif
6164}
6165
6166VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6167vkResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
6168    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6169    bool commandBufferComplete = false;
6170    bool skipCall = false;
6171    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6172#if MTMERGESOURCE
6173    // MTMTODO : Merge this with *NotInUse() call below
6174    loader_platform_thread_lock_mutex(&globalLock);
6175    auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6176    // Verify that CB's in pool are complete (not in-flight)
6177    while (it != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
6178        skipCall = checkCBCompleted(dev_data, (*it), &commandBufferComplete);
6179        if (!commandBufferComplete) {
6180            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6181                                (uint64_t)(*it), __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6182                                "Resetting CB %p before it has completed. You must check CB "
6183                                "flag before calling vkResetCommandBuffer().",
6184                                (*it));
6185        } else {
6186            // Clear memory references at this point.
6187            clear_cmd_buf_and_mem_references(dev_data, (*it));
6188        }
6189        ++it;
6190    }
6191    loader_platform_thread_unlock_mutex(&globalLock);
6192#endif
6193    if (VK_TRUE == validateCommandBuffersNotInUse(dev_data, commandPool))
6194        return VK_ERROR_VALIDATION_FAILED_EXT;
6195
6196    if (!skipCall)
6197        result = dev_data->device_dispatch_table->ResetCommandPool(device, commandPool, flags);
6198
6199    // Reset all of the CBs allocated from this pool
6200    if (VK_SUCCESS == result) {
6201        loader_platform_thread_lock_mutex(&globalLock);
6202        auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6203        while (it != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
6204            resetCB(dev_data, (*it));
6205            ++it;
6206        }
6207        loader_platform_thread_unlock_mutex(&globalLock);
6208    }
6209    return result;
6210}
6211
6212VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
6213    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6214    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6215    bool skipCall = false;
6216    loader_platform_thread_lock_mutex(&globalLock);
6217    for (uint32_t i = 0; i < fenceCount; ++i) {
6218#if MTMERGESOURCE
6219        // Reset fence state in fenceCreateInfo structure
6220        // MTMTODO : Merge with code below
6221        auto fence_item = dev_data->fenceMap.find(pFences[i]);
6222        if (fence_item != dev_data->fenceMap.end()) {
6223            // Validate fences in SIGNALED state
6224            if (!(fence_item->second.createInfo.flags & VK_FENCE_CREATE_SIGNALED_BIT)) {
6225                // TODO: I don't see a Valid Usage section for ResetFences. This behavior should be documented there.
6226                skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6227                                   (uint64_t)pFences[i], __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
6228                                   "Fence %#" PRIxLEAST64 " submitted to VkResetFences in UNSIGNALED STATE", (uint64_t)pFences[i]);
6229            } else {
6230                fence_item->second.createInfo.flags =
6231                    static_cast<VkFenceCreateFlags>(fence_item->second.createInfo.flags & ~VK_FENCE_CREATE_SIGNALED_BIT);
6232            }
6233        }
6234#endif
6235        if (dev_data->fenceMap[pFences[i]].in_use.load()) {
6236            skipCall |=
6237                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6238                        reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
6239                        "Fence %#" PRIx64 " is in use by a command buffer.", reinterpret_cast<const uint64_t &>(pFences[i]));
6240        }
6241    }
6242    loader_platform_thread_unlock_mutex(&globalLock);
6243    if (!skipCall)
6244        result = dev_data->device_dispatch_table->ResetFences(device, fenceCount, pFences);
6245    return result;
6246}
6247
6248VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6249vkDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
6250    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6251    auto fbNode = dev_data->frameBufferMap.find(framebuffer);
6252    if (fbNode != dev_data->frameBufferMap.end()) {
6253        for (auto cb : fbNode->second.referencingCmdBuffers) {
6254            auto cbNode = dev_data->commandBufferMap.find(cb);
6255            if (cbNode != dev_data->commandBufferMap.end()) {
6256                // Set CB as invalid and record destroyed framebuffer
6257                cbNode->second->state = CB_INVALID;
6258                loader_platform_thread_lock_mutex(&globalLock);
6259                cbNode->second->destroyedFramebuffers.insert(framebuffer);
6260                loader_platform_thread_unlock_mutex(&globalLock);
6261            }
6262        }
6263        loader_platform_thread_lock_mutex(&globalLock);
6264        dev_data->frameBufferMap.erase(framebuffer);
6265        loader_platform_thread_unlock_mutex(&globalLock);
6266    }
6267    dev_data->device_dispatch_table->DestroyFramebuffer(device, framebuffer, pAllocator);
6268}
6269
6270VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6271vkDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
6272    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6273    dev_data->device_dispatch_table->DestroyRenderPass(device, renderPass, pAllocator);
6274    loader_platform_thread_lock_mutex(&globalLock);
6275    dev_data->renderPassMap.erase(renderPass);
6276    loader_platform_thread_unlock_mutex(&globalLock);
6277}
6278
6279VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
6280                                                              const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
6281    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6282
6283    VkResult result = dev_data->device_dispatch_table->CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
6284
6285    if (VK_SUCCESS == result) {
6286        loader_platform_thread_lock_mutex(&globalLock);
6287#if MTMERGESOURCE
6288        add_object_create_info(dev_data, (uint64_t)*pBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, pCreateInfo);
6289#endif
6290        // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
6291        dev_data->bufferMap[*pBuffer].create_info = unique_ptr<VkBufferCreateInfo>(new VkBufferCreateInfo(*pCreateInfo));
6292        dev_data->bufferMap[*pBuffer].in_use.store(0);
6293        loader_platform_thread_unlock_mutex(&globalLock);
6294    }
6295    return result;
6296}
6297
6298VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
6299                                                                  const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
6300    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6301    VkResult result = dev_data->device_dispatch_table->CreateBufferView(device, pCreateInfo, pAllocator, pView);
6302    if (VK_SUCCESS == result) {
6303        loader_platform_thread_lock_mutex(&globalLock);
6304        dev_data->bufferViewMap[*pView] = VkBufferViewCreateInfo(*pCreateInfo);
6305#if MTMERGESOURCE
6306        // In order to create a valid buffer view, the buffer must have been created with at least one of the
6307        // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
6308        validate_buffer_usage_flags(dev_data, device, pCreateInfo->buffer,
6309                                    VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, VK_FALSE,
6310                                    "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
6311#endif
6312        loader_platform_thread_unlock_mutex(&globalLock);
6313    }
6314    return result;
6315}
6316
6317VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
6318                                                             const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
6319    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6320
6321    VkResult result = dev_data->device_dispatch_table->CreateImage(device, pCreateInfo, pAllocator, pImage);
6322
6323    if (VK_SUCCESS == result) {
6324        loader_platform_thread_lock_mutex(&globalLock);
6325#if MTMERGESOURCE
6326        add_object_create_info(dev_data, (uint64_t)*pImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, pCreateInfo);
6327#endif
6328        IMAGE_LAYOUT_NODE image_node;
6329        image_node.layout = pCreateInfo->initialLayout;
6330        image_node.format = pCreateInfo->format;
6331        dev_data->imageMap[*pImage].createInfo = *pCreateInfo;
6332        ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()};
6333        dev_data->imageSubresourceMap[*pImage].push_back(subpair);
6334        dev_data->imageLayoutMap[subpair] = image_node;
6335        loader_platform_thread_unlock_mutex(&globalLock);
6336    }
6337    return result;
6338}
6339
6340static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
6341    /* expects globalLock to be held by caller */
6342
6343    auto image_node_it = dev_data->imageMap.find(image);
6344    if (image_node_it != dev_data->imageMap.end()) {
6345        /* If the caller used the special values VK_REMAINING_MIP_LEVELS and
6346         * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to
6347         * the actual values.
6348         */
6349        if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
6350            range->levelCount = image_node_it->second.createInfo.mipLevels - range->baseMipLevel;
6351        }
6352
6353        if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
6354            range->layerCount = image_node_it->second.createInfo.arrayLayers - range->baseArrayLayer;
6355        }
6356    }
6357}
6358
6359// Return the correct layer/level counts if the caller used the special
6360// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
6361static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
6362                                         VkImage image) {
6363    /* expects globalLock to be held by caller */
6364
6365    *levels = range.levelCount;
6366    *layers = range.layerCount;
6367    auto image_node_it = dev_data->imageMap.find(image);
6368    if (image_node_it != dev_data->imageMap.end()) {
6369        if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
6370            *levels = image_node_it->second.createInfo.mipLevels - range.baseMipLevel;
6371        }
6372        if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
6373            *layers = image_node_it->second.createInfo.arrayLayers - range.baseArrayLayer;
6374        }
6375    }
6376}
6377
6378VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
6379                                                                 const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
6380    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6381    VkResult result = dev_data->device_dispatch_table->CreateImageView(device, pCreateInfo, pAllocator, pView);
6382    if (VK_SUCCESS == result) {
6383        loader_platform_thread_lock_mutex(&globalLock);
6384        VkImageViewCreateInfo localCI = VkImageViewCreateInfo(*pCreateInfo);
6385        ResolveRemainingLevelsLayers(dev_data, &localCI.subresourceRange, pCreateInfo->image);
6386        dev_data->imageViewMap[*pView] = localCI;
6387#if MTMERGESOURCE
6388        // Validate that img has correct usage flags set
6389        validate_image_usage_flags(dev_data, device, pCreateInfo->image,
6390                                   VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT |
6391                                       VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
6392                                   VK_FALSE, "vkCreateImageView()", "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT]_BIT");
6393#endif
6394        loader_platform_thread_unlock_mutex(&globalLock);
6395    }
6396    return result;
6397}
6398
6399VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6400vkCreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
6401    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6402    VkResult result = dev_data->device_dispatch_table->CreateFence(device, pCreateInfo, pAllocator, pFence);
6403    if (VK_SUCCESS == result) {
6404        loader_platform_thread_lock_mutex(&globalLock);
6405        FENCE_NODE *pFN = &dev_data->fenceMap[*pFence];
6406#if MTMERGESOURCE
6407        memset(pFN, 0, sizeof(MT_FENCE_INFO));
6408        memcpy(&(pFN->createInfo), pCreateInfo, sizeof(VkFenceCreateInfo));
6409        if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
6410            pFN->firstTimeFlag = VK_TRUE;
6411        }
6412#endif
6413        pFN->in_use.store(0);
6414        loader_platform_thread_unlock_mutex(&globalLock);
6415    }
6416    return result;
6417}
6418
6419// TODO handle pipeline caches
6420VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
6421                                                     const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
6422    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6423    VkResult result = dev_data->device_dispatch_table->CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
6424    return result;
6425}
6426
6427VKAPI_ATTR void VKAPI_CALL
6428vkDestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
6429    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6430    dev_data->device_dispatch_table->DestroyPipelineCache(device, pipelineCache, pAllocator);
6431}
6432
6433VKAPI_ATTR VkResult VKAPI_CALL
6434vkGetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
6435    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6436    VkResult result = dev_data->device_dispatch_table->GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
6437    return result;
6438}
6439
6440VKAPI_ATTR VkResult VKAPI_CALL
6441vkMergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
6442    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6443    VkResult result = dev_data->device_dispatch_table->MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
6444    return result;
6445}
6446
6447// utility function to set collective state for pipeline
6448void set_pipeline_state(PIPELINE_NODE *pPipe) {
6449    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
6450    if (pPipe->graphicsPipelineCI.pColorBlendState) {
6451        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
6452            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
6453                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6454                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6455                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6456                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6457                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6458                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6459                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6460                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
6461                    pPipe->blendConstantsEnabled = true;
6462                }
6463            }
6464        }
6465    }
6466}
6467
6468VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6469vkCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6470                          const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6471                          VkPipeline *pPipelines) {
6472    VkResult result = VK_SUCCESS;
6473    // TODO What to do with pipelineCache?
6474    // The order of operations here is a little convoluted but gets the job done
6475    //  1. Pipeline create state is first shadowed into PIPELINE_NODE struct
6476    //  2. Create state is then validated (which uses flags setup during shadowing)
6477    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
6478    VkBool32 skipCall = VK_FALSE;
6479    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6480    vector<PIPELINE_NODE *> pPipeNode(count);
6481    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6482
6483    uint32_t i = 0;
6484    loader_platform_thread_lock_mutex(&globalLock);
6485
6486    for (i = 0; i < count; i++) {
6487        pPipeNode[i] = initGraphicsPipeline(dev_data, &pCreateInfos[i]);
6488        skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode, i);
6489    }
6490
6491    if (VK_FALSE == skipCall) {
6492        loader_platform_thread_unlock_mutex(&globalLock);
6493        result = dev_data->device_dispatch_table->CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6494                                                                          pPipelines);
6495        loader_platform_thread_lock_mutex(&globalLock);
6496        for (i = 0; i < count; i++) {
6497            pPipeNode[i]->pipeline = pPipelines[i];
6498            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6499        }
6500        loader_platform_thread_unlock_mutex(&globalLock);
6501    } else {
6502        for (i = 0; i < count; i++) {
6503            delete pPipeNode[i];
6504        }
6505        loader_platform_thread_unlock_mutex(&globalLock);
6506        return VK_ERROR_VALIDATION_FAILED_EXT;
6507    }
6508    return result;
6509}
6510
6511VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6512vkCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6513                         const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6514                         VkPipeline *pPipelines) {
6515    VkResult result = VK_SUCCESS;
6516    VkBool32 skipCall = VK_FALSE;
6517
6518    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6519    vector<PIPELINE_NODE *> pPipeNode(count);
6520    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6521
6522    uint32_t i = 0;
6523    loader_platform_thread_lock_mutex(&globalLock);
6524    for (i = 0; i < count; i++) {
6525        // TODO: Verify compute stage bits
6526
6527        // Create and initialize internal tracking data structure
6528        pPipeNode[i] = new PIPELINE_NODE;
6529        memcpy(&pPipeNode[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo));
6530
6531        // TODO: Add Compute Pipeline Verification
6532        // skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode[i]);
6533    }
6534
6535    if (VK_FALSE == skipCall) {
6536        loader_platform_thread_unlock_mutex(&globalLock);
6537        result = dev_data->device_dispatch_table->CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6538                                                                         pPipelines);
6539        loader_platform_thread_lock_mutex(&globalLock);
6540        for (i = 0; i < count; i++) {
6541            pPipeNode[i]->pipeline = pPipelines[i];
6542            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6543        }
6544        loader_platform_thread_unlock_mutex(&globalLock);
6545    } else {
6546        for (i = 0; i < count; i++) {
6547            // Clean up any locally allocated data structures
6548            delete pPipeNode[i];
6549        }
6550        loader_platform_thread_unlock_mutex(&globalLock);
6551        return VK_ERROR_VALIDATION_FAILED_EXT;
6552    }
6553    return result;
6554}
6555
6556VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
6557                                                               const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
6558    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6559    VkResult result = dev_data->device_dispatch_table->CreateSampler(device, pCreateInfo, pAllocator, pSampler);
6560    if (VK_SUCCESS == result) {
6561        loader_platform_thread_lock_mutex(&globalLock);
6562        dev_data->sampleMap[*pSampler] = unique_ptr<SAMPLER_NODE>(new SAMPLER_NODE(pSampler, pCreateInfo));
6563        loader_platform_thread_unlock_mutex(&globalLock);
6564    }
6565    return result;
6566}
6567
6568VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6569vkCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
6570                            const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
6571    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6572    VkResult result = dev_data->device_dispatch_table->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
6573    if (VK_SUCCESS == result) {
6574        // TODOSC : Capture layout bindings set
6575        LAYOUT_NODE *pNewNode = new LAYOUT_NODE;
6576        if (NULL == pNewNode) {
6577            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
6578                        (uint64_t)*pSetLayout, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6579                        "Out of memory while attempting to allocate LAYOUT_NODE in vkCreateDescriptorSetLayout()"))
6580                return VK_ERROR_VALIDATION_FAILED_EXT;
6581        }
6582        memcpy((void *)&pNewNode->createInfo, pCreateInfo, sizeof(VkDescriptorSetLayoutCreateInfo));
6583        pNewNode->createInfo.pBindings = new VkDescriptorSetLayoutBinding[pCreateInfo->bindingCount];
6584        memcpy((void *)pNewNode->createInfo.pBindings, pCreateInfo->pBindings,
6585               sizeof(VkDescriptorSetLayoutBinding) * pCreateInfo->bindingCount);
6586        // g++ does not like reserve with size 0
6587        if (pCreateInfo->bindingCount)
6588            pNewNode->bindingToIndexMap.reserve(pCreateInfo->bindingCount);
6589        uint32_t totalCount = 0;
6590        for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
6591            if (!pNewNode->bindingToIndexMap.emplace(pCreateInfo->pBindings[i].binding, i).second) {
6592                if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6593                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)*pSetLayout, __LINE__,
6594                            DRAWSTATE_INVALID_LAYOUT, "DS", "duplicated binding number in "
6595                                                            "VkDescriptorSetLayoutBinding"))
6596                    return VK_ERROR_VALIDATION_FAILED_EXT;
6597            } else {
6598                pNewNode->bindingToIndexMap[pCreateInfo->pBindings[i].binding] = i;
6599            }
6600            totalCount += pCreateInfo->pBindings[i].descriptorCount;
6601            if (pCreateInfo->pBindings[i].pImmutableSamplers) {
6602                VkSampler **ppIS = (VkSampler **)&pNewNode->createInfo.pBindings[i].pImmutableSamplers;
6603                *ppIS = new VkSampler[pCreateInfo->pBindings[i].descriptorCount];
6604                memcpy(*ppIS, pCreateInfo->pBindings[i].pImmutableSamplers,
6605                       pCreateInfo->pBindings[i].descriptorCount * sizeof(VkSampler));
6606            }
6607        }
6608        pNewNode->layout = *pSetLayout;
6609        pNewNode->startIndex = 0;
6610        if (totalCount > 0) {
6611            pNewNode->descriptorTypes.resize(totalCount);
6612            pNewNode->stageFlags.resize(totalCount);
6613            uint32_t offset = 0;
6614            uint32_t j = 0;
6615            VkDescriptorType dType;
6616            for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
6617                dType = pCreateInfo->pBindings[i].descriptorType;
6618                for (j = 0; j < pCreateInfo->pBindings[i].descriptorCount; j++) {
6619                    pNewNode->descriptorTypes[offset + j] = dType;
6620                    pNewNode->stageFlags[offset + j] = pCreateInfo->pBindings[i].stageFlags;
6621                    if ((dType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
6622                        (dType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
6623                        pNewNode->dynamicDescriptorCount++;
6624                    }
6625                }
6626                offset += j;
6627            }
6628            pNewNode->endIndex = pNewNode->startIndex + totalCount - 1;
6629        } else { // no descriptors
6630            pNewNode->endIndex = 0;
6631        }
6632        // Put new node at Head of global Layer list
6633        loader_platform_thread_lock_mutex(&globalLock);
6634        dev_data->descriptorSetLayoutMap[*pSetLayout] = pNewNode;
6635        loader_platform_thread_unlock_mutex(&globalLock);
6636    }
6637    return result;
6638}
6639
6640static bool validatePushConstantSize(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
6641                                     const char *caller_name) {
6642    bool skipCall = false;
6643    if ((offset + size) > dev_data->physDevProperties.properties.limits.maxPushConstantsSize) {
6644        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6645                           DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that "
6646                                                                 "exceeds this device's maxPushConstantSize of %u.",
6647                           caller_name, offset, size, dev_data->physDevProperties.properties.limits.maxPushConstantsSize);
6648    }
6649    return skipCall;
6650}
6651
6652VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
6653                                                      const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
6654    bool skipCall = false;
6655    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6656    uint32_t i = 0;
6657    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6658        skipCall |= validatePushConstantSize(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
6659                                             pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()");
6660        if ((pCreateInfo->pPushConstantRanges[i].size == 0) || ((pCreateInfo->pPushConstantRanges[i].size & 0x3) != 0)) {
6661            skipCall |=
6662                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6663                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constant index %u with "
6664                                                              "size %u. Size must be greater than zero and a multiple of 4.",
6665                        i, pCreateInfo->pPushConstantRanges[i].size);
6666        }
6667        // TODO : Add warning if ranges overlap
6668    }
6669    VkResult result = dev_data->device_dispatch_table->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
6670    if (VK_SUCCESS == result) {
6671        loader_platform_thread_lock_mutex(&globalLock);
6672        // TODOSC : Merge capture of the setLayouts per pipeline
6673        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
6674        plNode.descriptorSetLayouts.resize(pCreateInfo->setLayoutCount);
6675        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
6676            plNode.descriptorSetLayouts[i] = pCreateInfo->pSetLayouts[i];
6677        }
6678        plNode.pushConstantRanges.resize(pCreateInfo->pushConstantRangeCount);
6679        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6680            plNode.pushConstantRanges[i] = pCreateInfo->pPushConstantRanges[i];
6681        }
6682        loader_platform_thread_unlock_mutex(&globalLock);
6683    }
6684    return result;
6685}
6686
6687VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6688vkCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
6689                       VkDescriptorPool *pDescriptorPool) {
6690    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6691    VkResult result = dev_data->device_dispatch_table->CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
6692    if (VK_SUCCESS == result) {
6693        // Insert this pool into Global Pool LL at head
6694        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6695                    (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool %#" PRIxLEAST64,
6696                    (uint64_t)*pDescriptorPool))
6697            return VK_ERROR_VALIDATION_FAILED_EXT;
6698        DESCRIPTOR_POOL_NODE *pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo);
6699        if (NULL == pNewNode) {
6700            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6701                        (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6702                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()"))
6703                return VK_ERROR_VALIDATION_FAILED_EXT;
6704        } else {
6705            loader_platform_thread_lock_mutex(&globalLock);
6706            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
6707            loader_platform_thread_unlock_mutex(&globalLock);
6708        }
6709    } else {
6710        // Need to do anything if pool create fails?
6711    }
6712    return result;
6713}
6714
6715VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6716vkResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
6717    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6718    VkResult result = dev_data->device_dispatch_table->ResetDescriptorPool(device, descriptorPool, flags);
6719    if (VK_SUCCESS == result) {
6720        loader_platform_thread_lock_mutex(&globalLock);
6721        clearDescriptorPool(dev_data, device, descriptorPool, flags);
6722        loader_platform_thread_unlock_mutex(&globalLock);
6723    }
6724    return result;
6725}
6726
6727VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6728vkAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
6729    VkBool32 skipCall = VK_FALSE;
6730    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6731
6732    loader_platform_thread_lock_mutex(&globalLock);
6733    // Verify that requested descriptorSets are available in pool
6734    DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool);
6735    if (!pPoolNode) {
6736        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6737                            (uint64_t)pAllocateInfo->descriptorPool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
6738                            "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
6739                            (uint64_t)pAllocateInfo->descriptorPool);
6740    } else { // Make sure pool has all the available descriptors before calling down chain
6741        skipCall |= validate_descriptor_availability_in_pool(dev_data, pPoolNode, pAllocateInfo->descriptorSetCount,
6742                                                             pAllocateInfo->pSetLayouts);
6743    }
6744    loader_platform_thread_unlock_mutex(&globalLock);
6745    if (skipCall)
6746        return VK_ERROR_VALIDATION_FAILED_EXT;
6747    VkResult result = dev_data->device_dispatch_table->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
6748    if (VK_SUCCESS == result) {
6749        loader_platform_thread_lock_mutex(&globalLock);
6750        DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool);
6751        if (pPoolNode) {
6752            if (pAllocateInfo->descriptorSetCount == 0) {
6753                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6754                        pAllocateInfo->descriptorSetCount, __LINE__, DRAWSTATE_NONE, "DS",
6755                        "AllocateDescriptorSets called with 0 count");
6756            }
6757            for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
6758                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6759                        (uint64_t)pDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS", "Created Descriptor Set %#" PRIxLEAST64,
6760                        (uint64_t)pDescriptorSets[i]);
6761                // Create new set node and add to head of pool nodes
6762                SET_NODE *pNewNode = new SET_NODE;
6763                if (NULL == pNewNode) {
6764                    if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6765                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6766                                DRAWSTATE_OUT_OF_MEMORY, "DS",
6767                                "Out of memory while attempting to allocate SET_NODE in vkAllocateDescriptorSets()")) {
6768                        loader_platform_thread_unlock_mutex(&globalLock);
6769                        return VK_ERROR_VALIDATION_FAILED_EXT;
6770                    }
6771                } else {
6772                    // TODO : Pool should store a total count of each type of Descriptor available
6773                    //  When descriptors are allocated, decrement the count and validate here
6774                    //  that the count doesn't go below 0. One reset/free need to bump count back up.
6775                    // Insert set at head of Set LL for this pool
6776                    pNewNode->pNext = pPoolNode->pSets;
6777                    pNewNode->in_use.store(0);
6778                    pPoolNode->pSets = pNewNode;
6779                    LAYOUT_NODE *pLayout = getLayoutNode(dev_data, pAllocateInfo->pSetLayouts[i]);
6780                    if (NULL == pLayout) {
6781                        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6782                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)pAllocateInfo->pSetLayouts[i],
6783                                    __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
6784                                    "Unable to find set layout node for layout %#" PRIxLEAST64
6785                                    " specified in vkAllocateDescriptorSets() call",
6786                                    (uint64_t)pAllocateInfo->pSetLayouts[i])) {
6787                            loader_platform_thread_unlock_mutex(&globalLock);
6788                            return VK_ERROR_VALIDATION_FAILED_EXT;
6789                        }
6790                    }
6791                    pNewNode->pLayout = pLayout;
6792                    pNewNode->pool = pAllocateInfo->descriptorPool;
6793                    pNewNode->set = pDescriptorSets[i];
6794                    pNewNode->descriptorCount = (pLayout->createInfo.bindingCount != 0) ? pLayout->endIndex + 1 : 0;
6795                    if (pNewNode->descriptorCount) {
6796                        pNewNode->pDescriptorUpdates.resize(pNewNode->descriptorCount);
6797                    }
6798                    dev_data->setMap[pDescriptorSets[i]] = pNewNode;
6799                }
6800            }
6801        }
6802        loader_platform_thread_unlock_mutex(&globalLock);
6803    }
6804    return result;
6805}
6806
6807VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6808vkFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
6809    VkBool32 skipCall = VK_FALSE;
6810    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6811    // Make sure that no sets being destroyed are in-flight
6812    loader_platform_thread_lock_mutex(&globalLock);
6813    for (uint32_t i = 0; i < count; ++i)
6814        skipCall |= validateIdleDescriptorSet(dev_data, pDescriptorSets[i], "vkFreeDesriptorSets");
6815    DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, descriptorPool);
6816    if (pPoolNode && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pPoolNode->createInfo.flags)) {
6817        // Can't Free from a NON_FREE pool
6818        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
6819                            (uint64_t)device, __LINE__, DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, "DS",
6820                            "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
6821                            "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
6822    }
6823    loader_platform_thread_unlock_mutex(&globalLock);
6824    if (VK_FALSE != skipCall)
6825        return VK_ERROR_VALIDATION_FAILED_EXT;
6826    VkResult result = dev_data->device_dispatch_table->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
6827    if (VK_SUCCESS == result) {
6828        loader_platform_thread_lock_mutex(&globalLock);
6829
6830        // Update available descriptor sets in pool
6831        pPoolNode->availableSets += count;
6832
6833        // For each freed descriptor add it back into the pool as available
6834        for (uint32_t i = 0; i < count; ++i) {
6835            SET_NODE *pSet = dev_data->setMap[pDescriptorSets[i]]; // getSetNode() without locking
6836            invalidateBoundCmdBuffers(dev_data, pSet);
6837            LAYOUT_NODE *pLayout = pSet->pLayout;
6838            uint32_t typeIndex = 0, poolSizeCount = 0;
6839            for (uint32_t j = 0; j < pLayout->createInfo.bindingCount; ++j) {
6840                typeIndex = static_cast<uint32_t>(pLayout->createInfo.pBindings[j].descriptorType);
6841                poolSizeCount = pLayout->createInfo.pBindings[j].descriptorCount;
6842                pPoolNode->availableDescriptorTypeCount[typeIndex] += poolSizeCount;
6843            }
6844        }
6845        loader_platform_thread_unlock_mutex(&globalLock);
6846    }
6847    // TODO : Any other clean-up or book-keeping to do here?
6848    return result;
6849}
6850
6851VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6852vkUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
6853                       uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
6854    // dsUpdate will return VK_TRUE only if a bailout error occurs, so we want to call down tree when update returns VK_FALSE
6855    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6856    loader_platform_thread_lock_mutex(&globalLock);
6857#if MTMERGESOURCE
6858    // MTMTODO : Merge this in with existing update code below and handle descriptor copies case
6859    uint32_t j = 0;
6860    for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
6861        if (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
6862            for (j = 0; j < pDescriptorWrites[i].descriptorCount; ++j) {
6863                dev_data->descriptorSetMap[pDescriptorWrites[i].dstSet].images.push_back(
6864                    pDescriptorWrites[i].pImageInfo[j].imageView);
6865            }
6866        } else if (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) {
6867            for (j = 0; j < pDescriptorWrites[i].descriptorCount; ++j) {
6868                dev_data->descriptorSetMap[pDescriptorWrites[i].dstSet].buffers.push_back(
6869                    dev_data->bufferViewMap[pDescriptorWrites[i].pTexelBufferView[j]].buffer);
6870            }
6871        } else if (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
6872                   pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
6873            for (j = 0; j < pDescriptorWrites[i].descriptorCount; ++j) {
6874                dev_data->descriptorSetMap[pDescriptorWrites[i].dstSet].buffers.push_back(
6875                    pDescriptorWrites[i].pBufferInfo[j].buffer);
6876            }
6877        }
6878    }
6879#endif
6880    VkBool32 rtn = dsUpdate(dev_data, device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
6881    loader_platform_thread_unlock_mutex(&globalLock);
6882    if (!rtn) {
6883        dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6884                                                              pDescriptorCopies);
6885    }
6886}
6887
6888VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6889vkAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
6890    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6891    VkResult result = dev_data->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
6892    if (VK_SUCCESS == result) {
6893        loader_platform_thread_lock_mutex(&globalLock);
6894        auto const &cp_it = dev_data->commandPoolMap.find(pCreateInfo->commandPool);
6895        if (cp_it != dev_data->commandPoolMap.end()) {
6896            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
6897                // Add command buffer to its commandPool map
6898                cp_it->second.commandBuffers.push_back(pCommandBuffer[i]);
6899                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
6900                // Add command buffer to map
6901                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
6902                resetCB(dev_data, pCommandBuffer[i]);
6903                pCB->createInfo = *pCreateInfo;
6904                pCB->device = device;
6905            }
6906        }
6907#if MTMERGESOURCE
6908        printCBList(dev_data, device);
6909#endif
6910        loader_platform_thread_unlock_mutex(&globalLock);
6911    }
6912    return result;
6913}
6914
6915VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6916vkBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
6917    VkBool32 skipCall = VK_FALSE;
6918    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6919    loader_platform_thread_lock_mutex(&globalLock);
6920    // Validate command buffer level
6921    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6922    if (pCB) {
6923#if MTMERGESOURCE
6924        bool commandBufferComplete = false;
6925        // MTMTODO : Merge this with code below
6926        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
6927        skipCall = checkCBCompleted(dev_data, commandBuffer, &commandBufferComplete);
6928
6929        if (!commandBufferComplete) {
6930            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6931                                (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6932                                "Calling vkBeginCommandBuffer() on active CB %p before it has completed. "
6933                                "You must check CB flag before this call.",
6934                                commandBuffer);
6935        }
6936#endif
6937        if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
6938            // Secondary Command Buffer
6939            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
6940            if (!pInfo) {
6941                skipCall |=
6942                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6943                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6944                            "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must have inheritance info.",
6945                            reinterpret_cast<void *>(commandBuffer));
6946            } else {
6947                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
6948                    if (!pInfo->renderPass) { // renderpass should NOT be null for an Secondary CB
6949                        skipCall |= log_msg(
6950                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6951                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6952                            "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must specify a valid renderpass parameter.",
6953                            reinterpret_cast<void *>(commandBuffer));
6954                    }
6955                    if (!pInfo->framebuffer) { // framebuffer may be null for an Secondary CB, but this affects perf
6956                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
6957                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6958                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE,
6959                                            "DS", "vkBeginCommandBuffer(): Secondary Command Buffers (%p) may perform better if a "
6960                                                  "valid framebuffer parameter is specified.",
6961                                            reinterpret_cast<void *>(commandBuffer));
6962                    } else {
6963                        string errorString = "";
6964                        auto fbNode = dev_data->frameBufferMap.find(pInfo->framebuffer);
6965                        if (fbNode != dev_data->frameBufferMap.end()) {
6966                            VkRenderPass fbRP = fbNode->second.createInfo.renderPass;
6967                            if (!verify_renderpass_compatibility(dev_data, fbRP, pInfo->renderPass, errorString)) {
6968                                // renderPass that framebuffer was created with
6969                                // must
6970                                // be compatible with local renderPass
6971                                skipCall |=
6972                                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6973                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6974                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
6975                                            "DS", "vkBeginCommandBuffer(): Secondary Command "
6976                                                  "Buffer (%p) renderPass (%#" PRIxLEAST64 ") is incompatible w/ framebuffer "
6977                                                  "(%#" PRIxLEAST64 ") w/ render pass (%#" PRIxLEAST64 ") due to: %s",
6978                                            reinterpret_cast<void *>(commandBuffer), (uint64_t)(pInfo->renderPass),
6979                                            (uint64_t)(pInfo->framebuffer), (uint64_t)(fbRP), errorString.c_str());
6980                            }
6981                            // Connect this framebuffer to this cmdBuffer
6982                            fbNode->second.referencingCmdBuffers.insert(pCB->commandBuffer);
6983                        }
6984                    }
6985                }
6986                if ((pInfo->occlusionQueryEnable == VK_FALSE ||
6987                     dev_data->physDevProperties.features.occlusionQueryPrecise == VK_FALSE) &&
6988                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
6989                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6990                                        VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
6991                                        __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6992                                        "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must not have "
6993                                        "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
6994                                        "support precise occlusion queries.",
6995                                        reinterpret_cast<void *>(commandBuffer));
6996                }
6997            }
6998            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
6999                auto rp_data = dev_data->renderPassMap.find(pInfo->renderPass);
7000                if (rp_data != dev_data->renderPassMap.end() && rp_data->second && rp_data->second->pCreateInfo) {
7001                    if (pInfo->subpass >= rp_data->second->pCreateInfo->subpassCount) {
7002                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7003                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
7004                                            DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7005                                            "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must has a subpass index (%d) "
7006                                            "that is less than the number of subpasses (%d).",
7007                                            (void *)commandBuffer, pInfo->subpass, rp_data->second->pCreateInfo->subpassCount);
7008                    }
7009                }
7010            }
7011        }
7012        if (CB_RECORDING == pCB->state) {
7013            skipCall |=
7014                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7015                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7016                        "vkBeginCommandBuffer(): Cannot call Begin on CB (%#" PRIxLEAST64
7017                        ") in the RECORDING state. Must first call vkEndCommandBuffer().",
7018                        (uint64_t)commandBuffer);
7019        } else if (CB_RECORDED == pCB->state) {
7020            VkCommandPool cmdPool = pCB->createInfo.commandPool;
7021            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
7022                skipCall |=
7023                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7024                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7025                            "Call to vkBeginCommandBuffer() on command buffer (%#" PRIxLEAST64
7026                            ") attempts to implicitly reset cmdBuffer created from command pool (%#" PRIxLEAST64
7027                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
7028                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
7029            }
7030            resetCB(dev_data, commandBuffer);
7031        }
7032        // Set updated state here in case implicit reset occurs above
7033        pCB->state = CB_RECORDING;
7034        pCB->beginInfo = *pBeginInfo;
7035        if (pCB->beginInfo.pInheritanceInfo) {
7036            pCB->inheritanceInfo = *(pCB->beginInfo.pInheritanceInfo);
7037            pCB->beginInfo.pInheritanceInfo = &pCB->inheritanceInfo;
7038        }
7039    } else {
7040        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7041                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
7042                            "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB %p!", (void *)commandBuffer);
7043    }
7044    loader_platform_thread_unlock_mutex(&globalLock);
7045    if (VK_FALSE != skipCall) {
7046        return VK_ERROR_VALIDATION_FAILED_EXT;
7047    }
7048    VkResult result = dev_data->device_dispatch_table->BeginCommandBuffer(commandBuffer, pBeginInfo);
7049#if MTMERGESOURCE
7050    loader_platform_thread_lock_mutex(&globalLock);
7051    clear_cmd_buf_and_mem_references(dev_data, commandBuffer);
7052    loader_platform_thread_unlock_mutex(&globalLock);
7053#endif
7054    return result;
7055}
7056
7057VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(VkCommandBuffer commandBuffer) {
7058    VkBool32 skipCall = VK_FALSE;
7059    VkResult result = VK_SUCCESS;
7060    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7061    loader_platform_thread_lock_mutex(&globalLock);
7062    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7063    if (pCB) {
7064        if (pCB->state != CB_RECORDING) {
7065            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkEndCommandBuffer()");
7066        }
7067        for (auto query : pCB->activeQueries) {
7068            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7069                                DRAWSTATE_INVALID_QUERY, "DS",
7070                                "Ending command buffer with in progress query: queryPool %" PRIu64 ", index %d",
7071                                (uint64_t)(query.pool), query.index);
7072        }
7073    }
7074    if (VK_FALSE == skipCall) {
7075        loader_platform_thread_unlock_mutex(&globalLock);
7076        result = dev_data->device_dispatch_table->EndCommandBuffer(commandBuffer);
7077        loader_platform_thread_lock_mutex(&globalLock);
7078        if (VK_SUCCESS == result) {
7079            pCB->state = CB_RECORDED;
7080            // Reset CB status flags
7081            pCB->status = 0;
7082            printCB(dev_data, commandBuffer);
7083        }
7084    } else {
7085        result = VK_ERROR_VALIDATION_FAILED_EXT;
7086    }
7087    loader_platform_thread_unlock_mutex(&globalLock);
7088    return result;
7089}
7090
7091VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
7092vkResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
7093    VkBool32 skipCall = VK_FALSE;
7094    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7095    loader_platform_thread_lock_mutex(&globalLock);
7096#if MTMERGESOURCE
7097    bool commandBufferComplete = false;
7098    // Verify that CB is complete (not in-flight)
7099    skipCall = checkCBCompleted(dev_data, commandBuffer, &commandBufferComplete);
7100    if (!commandBufferComplete) {
7101        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7102                            (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
7103                            "Resetting CB %p before it has completed. You must check CB "
7104                            "flag before calling vkResetCommandBuffer().",
7105                            commandBuffer);
7106    }
7107    // Clear memory references as this point.
7108    clear_cmd_buf_and_mem_references(dev_data, commandBuffer);
7109#endif
7110    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7111    VkCommandPool cmdPool = pCB->createInfo.commandPool;
7112    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
7113        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7114                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7115                            "Attempt to reset command buffer (%#" PRIxLEAST64 ") created from command pool (%#" PRIxLEAST64
7116                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
7117                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
7118    }
7119    if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
7120        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7121                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7122                            "Attempt to reset command buffer (%#" PRIxLEAST64 ") which is in use.",
7123                            reinterpret_cast<uint64_t>(commandBuffer));
7124    }
7125    loader_platform_thread_unlock_mutex(&globalLock);
7126    if (skipCall != VK_FALSE)
7127        return VK_ERROR_VALIDATION_FAILED_EXT;
7128    VkResult result = dev_data->device_dispatch_table->ResetCommandBuffer(commandBuffer, flags);
7129    if (VK_SUCCESS == result) {
7130        loader_platform_thread_lock_mutex(&globalLock);
7131        resetCB(dev_data, commandBuffer);
7132        loader_platform_thread_unlock_mutex(&globalLock);
7133    }
7134    return result;
7135}
7136#if MTMERGESOURCE
7137// TODO : For any vkCmdBind* calls that include an object which has mem bound to it,
7138//    need to account for that mem now having binding to given commandBuffer
7139#endif
7140VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7141vkCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
7142    VkBool32 skipCall = VK_FALSE;
7143    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7144    loader_platform_thread_lock_mutex(&globalLock);
7145    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7146    if (pCB) {
7147        skipCall |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
7148        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) {
7149            skipCall |=
7150                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7151                        (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
7152                        "Incorrectly binding compute pipeline (%#" PRIxLEAST64 ") during active RenderPass (%#" PRIxLEAST64 ")",
7153                        (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass);
7154        }
7155
7156        PIPELINE_NODE *pPN = getPipeline(dev_data, pipeline);
7157        if (pPN) {
7158            pCB->lastBound[pipelineBindPoint].pipeline = pipeline;
7159            set_cb_pso_status(pCB, pPN);
7160            set_pipeline_state(pPN);
7161            skipCall |= validatePipelineState(dev_data, pCB, pipelineBindPoint, pipeline);
7162        } else {
7163            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7164                                (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
7165                                "Attempt to bind Pipeline %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline));
7166        }
7167    }
7168    loader_platform_thread_unlock_mutex(&globalLock);
7169    if (VK_FALSE == skipCall)
7170        dev_data->device_dispatch_table->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
7171}
7172
7173VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7174vkCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
7175    VkBool32 skipCall = VK_FALSE;
7176    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7177    loader_platform_thread_lock_mutex(&globalLock);
7178    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7179    if (pCB) {
7180        skipCall |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
7181        pCB->status |= CBSTATUS_VIEWPORT_SET;
7182        pCB->viewports.resize(viewportCount);
7183        memcpy(pCB->viewports.data(), pViewports, viewportCount * sizeof(VkViewport));
7184    }
7185    loader_platform_thread_unlock_mutex(&globalLock);
7186    if (VK_FALSE == skipCall)
7187        dev_data->device_dispatch_table->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
7188}
7189
7190VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7191vkCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
7192    VkBool32 skipCall = VK_FALSE;
7193    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7194    loader_platform_thread_lock_mutex(&globalLock);
7195    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7196    if (pCB) {
7197        skipCall |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
7198        pCB->status |= CBSTATUS_SCISSOR_SET;
7199        pCB->scissors.resize(scissorCount);
7200        memcpy(pCB->scissors.data(), pScissors, scissorCount * sizeof(VkRect2D));
7201    }
7202    loader_platform_thread_unlock_mutex(&globalLock);
7203    if (VK_FALSE == skipCall)
7204        dev_data->device_dispatch_table->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
7205}
7206
7207VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
7208    VkBool32 skipCall = VK_FALSE;
7209    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7210    loader_platform_thread_lock_mutex(&globalLock);
7211    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7212    if (pCB) {
7213        skipCall |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
7214        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
7215    }
7216    loader_platform_thread_unlock_mutex(&globalLock);
7217    if (VK_FALSE == skipCall)
7218        dev_data->device_dispatch_table->CmdSetLineWidth(commandBuffer, lineWidth);
7219}
7220
7221VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7222vkCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
7223    VkBool32 skipCall = VK_FALSE;
7224    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7225    loader_platform_thread_lock_mutex(&globalLock);
7226    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7227    if (pCB) {
7228        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
7229        pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
7230    }
7231    loader_platform_thread_unlock_mutex(&globalLock);
7232    if (VK_FALSE == skipCall)
7233        dev_data->device_dispatch_table->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp,
7234                                                         depthBiasSlopeFactor);
7235}
7236
7237VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
7238    VkBool32 skipCall = VK_FALSE;
7239    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7240    loader_platform_thread_lock_mutex(&globalLock);
7241    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7242    if (pCB) {
7243        skipCall |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
7244        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
7245    }
7246    loader_platform_thread_unlock_mutex(&globalLock);
7247    if (VK_FALSE == skipCall)
7248        dev_data->device_dispatch_table->CmdSetBlendConstants(commandBuffer, blendConstants);
7249}
7250
7251VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7252vkCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
7253    VkBool32 skipCall = VK_FALSE;
7254    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7255    loader_platform_thread_lock_mutex(&globalLock);
7256    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7257    if (pCB) {
7258        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
7259        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
7260    }
7261    loader_platform_thread_unlock_mutex(&globalLock);
7262    if (VK_FALSE == skipCall)
7263        dev_data->device_dispatch_table->CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
7264}
7265
7266VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7267vkCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
7268    VkBool32 skipCall = VK_FALSE;
7269    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7270    loader_platform_thread_lock_mutex(&globalLock);
7271    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7272    if (pCB) {
7273        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
7274        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
7275    }
7276    loader_platform_thread_unlock_mutex(&globalLock);
7277    if (VK_FALSE == skipCall)
7278        dev_data->device_dispatch_table->CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
7279}
7280
7281VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7282vkCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
7283    VkBool32 skipCall = VK_FALSE;
7284    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7285    loader_platform_thread_lock_mutex(&globalLock);
7286    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7287    if (pCB) {
7288        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
7289        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
7290    }
7291    loader_platform_thread_unlock_mutex(&globalLock);
7292    if (VK_FALSE == skipCall)
7293        dev_data->device_dispatch_table->CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
7294}
7295
7296VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7297vkCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
7298    VkBool32 skipCall = VK_FALSE;
7299    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7300    loader_platform_thread_lock_mutex(&globalLock);
7301    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7302    if (pCB) {
7303        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
7304        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
7305    }
7306    loader_platform_thread_unlock_mutex(&globalLock);
7307    if (VK_FALSE == skipCall)
7308        dev_data->device_dispatch_table->CmdSetStencilReference(commandBuffer, faceMask, reference);
7309}
7310
7311VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7312vkCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
7313                        uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
7314                        const uint32_t *pDynamicOffsets) {
7315    VkBool32 skipCall = VK_FALSE;
7316    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7317    loader_platform_thread_lock_mutex(&globalLock);
7318#if MTMERGESOURCE
7319    // MTMTODO : Merge this with code below
7320    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7321    if (cb_data != dev_data->commandBufferMap.end()) {
7322        // MTMTODO : activeDescriptorSets should be merged with lastBound.boundDescriptorSets
7323        std::vector<VkDescriptorSet> &activeDescriptorSets = cb_data->second->activeDescriptorSets;
7324        if (activeDescriptorSets.size() < (setCount + firstSet)) {
7325            activeDescriptorSets.resize(setCount + firstSet);
7326        }
7327        for (uint32_t i = 0; i < setCount; ++i) {
7328            activeDescriptorSets[i + firstSet] = pDescriptorSets[i];
7329        }
7330    }
7331    // TODO : Somewhere need to verify that all textures referenced by shaders in DS are in some type of *SHADER_READ* state
7332#endif
7333    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7334    if (pCB) {
7335        if (pCB->state == CB_RECORDING) {
7336            // Track total count of dynamic descriptor types to make sure we have an offset for each one
7337            uint32_t totalDynamicDescriptors = 0;
7338            string errorString = "";
7339            uint32_t lastSetIndex = firstSet + setCount - 1;
7340            if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size())
7341                pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7342            VkDescriptorSet oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
7343            for (uint32_t i = 0; i < setCount; i++) {
7344                SET_NODE *pSet = getSetNode(dev_data, pDescriptorSets[i]);
7345                if (pSet) {
7346                    pCB->lastBound[pipelineBindPoint].uniqueBoundSets.insert(pDescriptorSets[i]);
7347                    pSet->boundCmdBuffers.insert(commandBuffer);
7348                    pCB->lastBound[pipelineBindPoint].pipelineLayout = layout;
7349                    pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pDescriptorSets[i];
7350                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7351                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7352                                        DRAWSTATE_NONE, "DS", "DS %#" PRIxLEAST64 " bound on pipeline %s",
7353                                        (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
7354                    if (!pSet->pUpdateStructs && (pSet->descriptorCount != 0)) {
7355                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
7356                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
7357                                            __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
7358                                            "DS %#" PRIxLEAST64
7359                                            " bound but it was never updated. You may want to either update it or not bind it.",
7360                                            (uint64_t)pDescriptorSets[i]);
7361                    }
7362                    // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
7363                    if (!verify_set_layout_compatibility(dev_data, pSet, layout, i + firstSet, errorString)) {
7364                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7365                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
7366                                            __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
7367                                            "descriptorSet #%u being bound is not compatible with overlapping layout in "
7368                                            "pipelineLayout due to: %s",
7369                                            i, errorString.c_str());
7370                    }
7371                    if (pSet->pLayout->dynamicDescriptorCount) {
7372                        // First make sure we won't overstep bounds of pDynamicOffsets array
7373                        if ((totalDynamicDescriptors + pSet->pLayout->dynamicDescriptorCount) > dynamicOffsetCount) {
7374                            skipCall |=
7375                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7376                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7377                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7378                                        "descriptorSet #%u (%#" PRIxLEAST64
7379                                        ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
7380                                        "array. There must be one dynamic offset for each dynamic descriptor being bound.",
7381                                        i, (uint64_t)pDescriptorSets[i], pSet->pLayout->dynamicDescriptorCount,
7382                                        (dynamicOffsetCount - totalDynamicDescriptors));
7383                        } else { // Validate and store dynamic offsets with the set
7384                            // Validate Dynamic Offset Minimums
7385                            uint32_t cur_dyn_offset = totalDynamicDescriptors;
7386                            for (uint32_t d = 0; d < pSet->descriptorCount; d++) {
7387                                if (pSet->pLayout->descriptorTypes[d] == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
7388                                    if (vk_safe_modulo(
7389                                            pDynamicOffsets[cur_dyn_offset],
7390                                            dev_data->physDevProperties.properties.limits.minUniformBufferOffsetAlignment) !=
7391                                        0) {
7392                                        skipCall |= log_msg(
7393                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7394                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7395                                            DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
7396                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7397                                            "device limit minUniformBufferOffsetAlignment %#" PRIxLEAST64,
7398                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7399                                            dev_data->physDevProperties.properties.limits.minUniformBufferOffsetAlignment);
7400                                    }
7401                                    cur_dyn_offset++;
7402                                } else if (pSet->pLayout->descriptorTypes[d] == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
7403                                    if (vk_safe_modulo(
7404                                            pDynamicOffsets[cur_dyn_offset],
7405                                            dev_data->physDevProperties.properties.limits.minStorageBufferOffsetAlignment) !=
7406                                        0) {
7407                                        skipCall |= log_msg(
7408                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7409                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7410                                            DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
7411                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7412                                            "device limit minStorageBufferOffsetAlignment %#" PRIxLEAST64,
7413                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7414                                            dev_data->physDevProperties.properties.limits.minStorageBufferOffsetAlignment);
7415                                    }
7416                                    cur_dyn_offset++;
7417                                }
7418                            }
7419                            // Keep running total of dynamic descriptor count to verify at the end
7420                            totalDynamicDescriptors += pSet->pLayout->dynamicDescriptorCount;
7421                        }
7422                    }
7423                } else {
7424                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7425                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7426                                        DRAWSTATE_INVALID_SET, "DS", "Attempt to bind DS %#" PRIxLEAST64 " that doesn't exist!",
7427                                        (uint64_t)pDescriptorSets[i]);
7428                }
7429                skipCall |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
7430                // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
7431                if (firstSet > 0) { // Check set #s below the first bound set
7432                    for (uint32_t i = 0; i < firstSet; ++i) {
7433                        if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
7434                            !verify_set_layout_compatibility(
7435                                dev_data, dev_data->setMap[pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i]], layout, i,
7436                                errorString)) {
7437                            skipCall |= log_msg(
7438                                dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7439                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
7440                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
7441                                "DescriptorSetDS %#" PRIxLEAST64
7442                                " previously bound as set #%u was disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")",
7443                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
7444                            pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
7445                        }
7446                    }
7447                }
7448                // Check if newly last bound set invalidates any remaining bound sets
7449                if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
7450                    if (oldFinalBoundSet &&
7451                        !verify_set_layout_compatibility(dev_data, dev_data->setMap[oldFinalBoundSet], layout, lastSetIndex,
7452                                                         errorString)) {
7453                        skipCall |=
7454                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7455                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)oldFinalBoundSet, __LINE__,
7456                                    DRAWSTATE_NONE, "DS", "DescriptorSetDS %#" PRIxLEAST64
7457                                                          " previously bound as set #%u is incompatible with set %#" PRIxLEAST64
7458                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
7459                                                          "disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")",
7460                                    (uint64_t)oldFinalBoundSet, lastSetIndex,
7461                                    (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
7462                                    lastSetIndex + 1, (uint64_t)layout);
7463                        pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7464                    }
7465                }
7466                //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7467                if (totalDynamicDescriptors != dynamicOffsetCount) {
7468                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7469                                        VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
7470                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7471                                        "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7472                                        "is %u. It should exactly match the number of dynamic descriptors.",
7473                                        setCount, totalDynamicDescriptors, dynamicOffsetCount);
7474                }
7475                // Save dynamicOffsets bound to this CB
7476                for (uint32_t i = 0; i < dynamicOffsetCount; i++) {
7477                    pCB->lastBound[pipelineBindPoint].dynamicOffsets.push_back(pDynamicOffsets[i]);
7478                }
7479            }
7480            //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7481            if (totalDynamicDescriptors != dynamicOffsetCount) {
7482                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7483                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
7484                                    DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7485                                    "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7486                                    "is %u. It should exactly match the number of dynamic descriptors.",
7487                                    setCount, totalDynamicDescriptors, dynamicOffsetCount);
7488            }
7489            // Save dynamicOffsets bound to this CB
7490            for (uint32_t i = 0; i < dynamicOffsetCount; i++) {
7491                pCB->dynamicOffsets.emplace_back(pDynamicOffsets[i]);
7492            }
7493        } else {
7494            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
7495        }
7496    }
7497    loader_platform_thread_unlock_mutex(&globalLock);
7498    if (VK_FALSE == skipCall)
7499        dev_data->device_dispatch_table->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
7500                                                               pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
7501}
7502
7503VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7504vkCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
7505    VkBool32 skipCall = VK_FALSE;
7506    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7507    loader_platform_thread_lock_mutex(&globalLock);
7508#if MTMERGESOURCE
7509    VkDeviceMemory mem;
7510    skipCall =
7511        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7512    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7513    if (cb_data != dev_data->commandBufferMap.end()) {
7514        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindIndexBuffer()"); };
7515        cb_data->second->validate_functions.push_back(function);
7516    }
7517    // TODO : Somewhere need to verify that IBs have correct usage state flagged
7518#endif
7519    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7520    if (pCB) {
7521        skipCall |= addCmd(dev_data, pCB, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
7522        VkDeviceSize offset_align = 0;
7523        switch (indexType) {
7524        case VK_INDEX_TYPE_UINT16:
7525            offset_align = 2;
7526            break;
7527        case VK_INDEX_TYPE_UINT32:
7528            offset_align = 4;
7529            break;
7530        default:
7531            // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
7532            break;
7533        }
7534        if (!offset_align || (offset % offset_align)) {
7535            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7536                                DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
7537                                "vkCmdBindIndexBuffer() offset (%#" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
7538                                offset, string_VkIndexType(indexType));
7539        }
7540        pCB->status |= CBSTATUS_INDEX_BUFFER_BOUND;
7541    }
7542    loader_platform_thread_unlock_mutex(&globalLock);
7543    if (VK_FALSE == skipCall)
7544        dev_data->device_dispatch_table->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
7545}
7546
7547void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
7548    uint32_t end = firstBinding + bindingCount;
7549    if (pCB->currentDrawData.buffers.size() < end) {
7550        pCB->currentDrawData.buffers.resize(end);
7551    }
7552    for (uint32_t i = 0; i < bindingCount; ++i) {
7553        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
7554    }
7555}
7556
7557void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
7558
7559VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
7560                                                                  uint32_t bindingCount, const VkBuffer *pBuffers,
7561                                                                  const VkDeviceSize *pOffsets) {
7562    VkBool32 skipCall = VK_FALSE;
7563    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7564    loader_platform_thread_lock_mutex(&globalLock);
7565#if MTMERGESOURCE
7566    for (uint32_t i = 0; i < bindingCount; ++i) {
7567        VkDeviceMemory mem;
7568        skipCall |= get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)(pBuffers[i]),
7569                                                 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7570        auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7571        if (cb_data != dev_data->commandBufferMap.end()) {
7572            std::function<VkBool32()> function =
7573                [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindVertexBuffers()"); };
7574            cb_data->second->validate_functions.push_back(function);
7575        }
7576    }
7577    // TODO : Somewhere need to verify that VBs have correct usage state flagged
7578#endif
7579    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7580    if (pCB) {
7581        addCmd(dev_data, pCB, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
7582        updateResourceTracking(pCB, firstBinding, bindingCount, pBuffers);
7583    } else {
7584        skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
7585    }
7586    loader_platform_thread_unlock_mutex(&globalLock);
7587    if (VK_FALSE == skipCall)
7588        dev_data->device_dispatch_table->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
7589}
7590
7591#if MTMERGESOURCE
7592/* expects globalLock to be held by caller */
7593bool markStoreImagesAndBuffersAsWritten(VkCommandBuffer commandBuffer) {
7594    bool skip_call = false;
7595    layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7596    auto cb_data = my_data->commandBufferMap.find(commandBuffer);
7597    if (cb_data == my_data->commandBufferMap.end())
7598        return skip_call;
7599    std::vector<VkDescriptorSet> &activeDescriptorSets = cb_data->second->activeDescriptorSets;
7600    for (auto descriptorSet : activeDescriptorSets) {
7601        auto ds_data = my_data->descriptorSetMap.find(descriptorSet);
7602        if (ds_data == my_data->descriptorSetMap.end())
7603            continue;
7604        std::vector<VkImageView> images = ds_data->second.images;
7605        std::vector<VkBuffer> buffers = ds_data->second.buffers;
7606        for (auto imageView : images) {
7607            auto iv_data = my_data->imageViewMap.find(imageView);
7608            if (iv_data == my_data->imageViewMap.end())
7609                continue;
7610            VkImage image = iv_data->second.image;
7611            VkDeviceMemory mem;
7612            skip_call |=
7613                get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7614            std::function<VkBool32()> function = [=]() {
7615                set_memory_valid(my_data, mem, true, image);
7616                return VK_FALSE;
7617            };
7618            cb_data->second->validate_functions.push_back(function);
7619        }
7620        for (auto buffer : buffers) {
7621            VkDeviceMemory mem;
7622            skip_call |=
7623                get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7624            std::function<VkBool32()> function = [=]() {
7625                set_memory_valid(my_data, mem, true);
7626                return VK_FALSE;
7627            };
7628            cb_data->second->validate_functions.push_back(function);
7629        }
7630    }
7631    return skip_call;
7632}
7633#endif
7634
7635VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
7636                                                     uint32_t firstVertex, uint32_t firstInstance) {
7637    VkBool32 skipCall = VK_FALSE;
7638    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7639    loader_platform_thread_lock_mutex(&globalLock);
7640#if MTMERGESOURCE
7641    // MTMTODO : merge with code below
7642    skipCall = markStoreImagesAndBuffersAsWritten(commandBuffer);
7643#endif
7644    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7645    if (pCB) {
7646        skipCall |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()");
7647        pCB->drawCount[DRAW]++;
7648        skipCall |= validate_draw_state(dev_data, pCB, VK_FALSE);
7649        // TODO : Need to pass commandBuffer as srcObj here
7650        skipCall |=
7651            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7652                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW]++);
7653        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7654        if (VK_FALSE == skipCall) {
7655            updateResourceTrackingOnDraw(pCB);
7656        }
7657        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDraw");
7658    }
7659    loader_platform_thread_unlock_mutex(&globalLock);
7660    if (VK_FALSE == skipCall)
7661        dev_data->device_dispatch_table->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
7662}
7663
7664VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
7665                                                            uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
7666                                                            uint32_t firstInstance) {
7667    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7668    VkBool32 skipCall = VK_FALSE;
7669    loader_platform_thread_lock_mutex(&globalLock);
7670#if MTMERGESOURCE
7671    // MTMTODO : merge with code below
7672    skipCall = markStoreImagesAndBuffersAsWritten(commandBuffer);
7673#endif
7674    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7675    if (pCB) {
7676        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()");
7677        pCB->drawCount[DRAW_INDEXED]++;
7678        skipCall |= validate_draw_state(dev_data, pCB, VK_TRUE);
7679        // TODO : Need to pass commandBuffer as srcObj here
7680        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7681                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7682                            "vkCmdDrawIndexed() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++);
7683        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7684        if (VK_FALSE == skipCall) {
7685            updateResourceTrackingOnDraw(pCB);
7686        }
7687        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed");
7688    }
7689    loader_platform_thread_unlock_mutex(&globalLock);
7690    if (VK_FALSE == skipCall)
7691        dev_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset,
7692                                                        firstInstance);
7693}
7694
7695VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7696vkCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7697    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7698    VkBool32 skipCall = VK_FALSE;
7699    loader_platform_thread_lock_mutex(&globalLock);
7700#if MTMERGESOURCE
7701    VkDeviceMemory mem;
7702    // MTMTODO : merge with code below
7703    skipCall =
7704        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7705    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndirect");
7706    skipCall |= markStoreImagesAndBuffersAsWritten(commandBuffer);
7707#endif
7708    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7709    if (pCB) {
7710        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()");
7711        pCB->drawCount[DRAW_INDIRECT]++;
7712        skipCall |= validate_draw_state(dev_data, pCB, VK_FALSE);
7713        // TODO : Need to pass commandBuffer as srcObj here
7714        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7715                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7716                            "vkCmdDrawIndirect() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++);
7717        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7718        if (VK_FALSE == skipCall) {
7719            updateResourceTrackingOnDraw(pCB);
7720        }
7721        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndirect");
7722    }
7723    loader_platform_thread_unlock_mutex(&globalLock);
7724    if (VK_FALSE == skipCall)
7725        dev_data->device_dispatch_table->CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
7726}
7727
7728VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7729vkCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7730    VkBool32 skipCall = VK_FALSE;
7731    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7732    loader_platform_thread_lock_mutex(&globalLock);
7733#if MTMERGESOURCE
7734    VkDeviceMemory mem;
7735    // MTMTODO : merge with code below
7736    skipCall =
7737        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7738    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndexedIndirect");
7739    skipCall |= markStoreImagesAndBuffersAsWritten(commandBuffer);
7740#endif
7741    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7742    if (pCB) {
7743        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()");
7744        pCB->drawCount[DRAW_INDEXED_INDIRECT]++;
7745        skipCall |= validate_draw_state(dev_data, pCB, VK_TRUE);
7746        // TODO : Need to pass commandBuffer as srcObj here
7747        skipCall |=
7748            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7749                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call #%" PRIu64 ", reporting DS state:",
7750                    g_drawCount[DRAW_INDEXED_INDIRECT]++);
7751        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7752        if (VK_FALSE == skipCall) {
7753            updateResourceTrackingOnDraw(pCB);
7754        }
7755        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexedIndirect");
7756    }
7757    loader_platform_thread_unlock_mutex(&globalLock);
7758    if (VK_FALSE == skipCall)
7759        dev_data->device_dispatch_table->CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
7760}
7761
7762VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
7763    VkBool32 skipCall = VK_FALSE;
7764    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7765    loader_platform_thread_lock_mutex(&globalLock);
7766#if MTMERGESOURCE
7767    skipCall = markStoreImagesAndBuffersAsWritten(commandBuffer);
7768#endif
7769    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7770    if (pCB) {
7771        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()");
7772        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatch");
7773    }
7774    loader_platform_thread_unlock_mutex(&globalLock);
7775    if (VK_FALSE == skipCall)
7776        dev_data->device_dispatch_table->CmdDispatch(commandBuffer, x, y, z);
7777}
7778
7779VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7780vkCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
7781    VkBool32 skipCall = VK_FALSE;
7782    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7783    loader_platform_thread_lock_mutex(&globalLock);
7784#if MTMERGESOURCE
7785    VkDeviceMemory mem;
7786    skipCall =
7787        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7788    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDispatchIndirect");
7789    skipCall |= markStoreImagesAndBuffersAsWritten(commandBuffer);
7790#endif
7791    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7792    if (pCB) {
7793        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()");
7794        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatchIndirect");
7795    }
7796    loader_platform_thread_unlock_mutex(&globalLock);
7797    if (VK_FALSE == skipCall)
7798        dev_data->device_dispatch_table->CmdDispatchIndirect(commandBuffer, buffer, offset);
7799}
7800
7801VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
7802                                                           uint32_t regionCount, const VkBufferCopy *pRegions) {
7803    VkBool32 skipCall = VK_FALSE;
7804    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7805    loader_platform_thread_lock_mutex(&globalLock);
7806#if MTMERGESOURCE
7807    VkDeviceMemory mem;
7808    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7809    skipCall =
7810        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7811    if (cb_data != dev_data->commandBufferMap.end()) {
7812        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyBuffer()"); };
7813        cb_data->second->validate_functions.push_back(function);
7814    }
7815    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBuffer");
7816    skipCall |=
7817        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7818    if (cb_data != dev_data->commandBufferMap.end()) {
7819        std::function<VkBool32()> function = [=]() {
7820            set_memory_valid(dev_data, mem, true);
7821            return VK_FALSE;
7822        };
7823        cb_data->second->validate_functions.push_back(function);
7824    }
7825    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBuffer");
7826    // Validate that SRC & DST buffers have correct usage flags set
7827    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
7828                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7829    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7830                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7831#endif
7832    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7833    if (pCB) {
7834        skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
7835        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBuffer");
7836    }
7837    loader_platform_thread_unlock_mutex(&globalLock);
7838    if (VK_FALSE == skipCall)
7839        dev_data->device_dispatch_table->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
7840}
7841
7842VkBool32 VerifySourceImageLayout(VkCommandBuffer cmdBuffer, VkImage srcImage, VkImageSubresourceLayers subLayers,
7843                                 VkImageLayout srcImageLayout) {
7844    VkBool32 skip_call = VK_FALSE;
7845
7846    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7847    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7848    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7849        uint32_t layer = i + subLayers.baseArrayLayer;
7850        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7851        IMAGE_CMD_BUF_LAYOUT_NODE node;
7852        if (!FindLayout(pCB, srcImage, sub, node)) {
7853            SetLayout(pCB, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout));
7854            continue;
7855        }
7856        if (node.layout != srcImageLayout) {
7857            // TODO: Improve log message in the next pass
7858            skip_call |=
7859                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7860                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
7861                                                                        "and doesn't match the current layout %s.",
7862                        string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
7863        }
7864    }
7865    if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
7866        if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7867            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7868            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7869                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7870                                 "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
7871        } else {
7872            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7873                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be "
7874                                                                       "TRANSFER_SRC_OPTIMAL or GENERAL.",
7875                                 string_VkImageLayout(srcImageLayout));
7876        }
7877    }
7878    return skip_call;
7879}
7880
7881VkBool32 VerifyDestImageLayout(VkCommandBuffer cmdBuffer, VkImage destImage, VkImageSubresourceLayers subLayers,
7882                               VkImageLayout destImageLayout) {
7883    VkBool32 skip_call = VK_FALSE;
7884
7885    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7886    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7887    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7888        uint32_t layer = i + subLayers.baseArrayLayer;
7889        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7890        IMAGE_CMD_BUF_LAYOUT_NODE node;
7891        if (!FindLayout(pCB, destImage, sub, node)) {
7892            SetLayout(pCB, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout));
7893            continue;
7894        }
7895        if (node.layout != destImageLayout) {
7896            skip_call |=
7897                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7898                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
7899                                                                        "doesn't match the current layout %s.",
7900                        string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
7901        }
7902    }
7903    if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
7904        if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7905            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7906            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7907                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7908                                 "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
7909        } else {
7910            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7911                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be "
7912                                                                       "TRANSFER_DST_OPTIMAL or GENERAL.",
7913                                 string_VkImageLayout(destImageLayout));
7914        }
7915    }
7916    return skip_call;
7917}
7918
7919VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7920vkCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7921               VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
7922    VkBool32 skipCall = VK_FALSE;
7923    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7924    loader_platform_thread_lock_mutex(&globalLock);
7925#if MTMERGESOURCE
7926    VkDeviceMemory mem;
7927    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7928    // Validate that src & dst images have correct usage flags set
7929    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7930    if (cb_data != dev_data->commandBufferMap.end()) {
7931        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyImage()", srcImage); };
7932        cb_data->second->validate_functions.push_back(function);
7933    }
7934    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImage");
7935    skipCall |=
7936        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7937    if (cb_data != dev_data->commandBufferMap.end()) {
7938        std::function<VkBool32()> function = [=]() {
7939            set_memory_valid(dev_data, mem, true, dstImage);
7940            return VK_FALSE;
7941        };
7942        cb_data->second->validate_functions.push_back(function);
7943    }
7944    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImage");
7945    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7946                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7947    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7948                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7949#endif
7950    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7951    if (pCB) {
7952        skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGE, "vkCmdCopyImage()");
7953        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImage");
7954        for (uint32_t i = 0; i < regionCount; ++i) {
7955            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].srcSubresource, srcImageLayout);
7956            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].dstSubresource, dstImageLayout);
7957        }
7958    }
7959    loader_platform_thread_unlock_mutex(&globalLock);
7960    if (VK_FALSE == skipCall)
7961        dev_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7962                                                      regionCount, pRegions);
7963}
7964
7965VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7966vkCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7967               VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
7968    VkBool32 skipCall = VK_FALSE;
7969    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7970    loader_platform_thread_lock_mutex(&globalLock);
7971#if MTMERGESOURCE
7972    VkDeviceMemory mem;
7973    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7974    // Validate that src & dst images have correct usage flags set
7975    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7976    if (cb_data != dev_data->commandBufferMap.end()) {
7977        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBlitImage()", srcImage); };
7978        cb_data->second->validate_functions.push_back(function);
7979    }
7980    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdBlitImage");
7981    skipCall |=
7982        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7983    if (cb_data != dev_data->commandBufferMap.end()) {
7984        std::function<VkBool32()> function = [=]() {
7985            set_memory_valid(dev_data, mem, true, dstImage);
7986            return VK_FALSE;
7987        };
7988        cb_data->second->validate_functions.push_back(function);
7989    }
7990    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdBlitImage");
7991    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7992                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7993    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7994                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7995#endif
7996    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7997    if (pCB) {
7998        skipCall |= addCmd(dev_data, pCB, CMD_BLITIMAGE, "vkCmdBlitImage()");
7999        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBlitImage");
8000    }
8001    loader_platform_thread_unlock_mutex(&globalLock);
8002    if (VK_FALSE == skipCall)
8003        dev_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
8004                                                      regionCount, pRegions, filter);
8005}
8006
8007VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
8008                                                                  VkImage dstImage, VkImageLayout dstImageLayout,
8009                                                                  uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8010    VkBool32 skipCall = VK_FALSE;
8011    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8012    loader_platform_thread_lock_mutex(&globalLock);
8013#if MTMERGESOURCE
8014    VkDeviceMemory mem;
8015    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8016    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8017    if (cb_data != dev_data->commandBufferMap.end()) {
8018        std::function<VkBool32()> function = [=]() {
8019            set_memory_valid(dev_data, mem, true, dstImage);
8020            return VK_FALSE;
8021        };
8022        cb_data->second->validate_functions.push_back(function);
8023    }
8024    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBufferToImage");
8025    skipCall |=
8026        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8027    if (cb_data != dev_data->commandBufferMap.end()) {
8028        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyBufferToImage()"); };
8029        cb_data->second->validate_functions.push_back(function);
8030    }
8031    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBufferToImage");
8032    // Validate that src buff & dst image have correct usage flags set
8033    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
8034                                            "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
8035    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8036                                           "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8037#endif
8038    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8039    if (pCB) {
8040        skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
8041        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBufferToImage");
8042        for (uint32_t i = 0; i < regionCount; ++i) {
8043            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].imageSubresource, dstImageLayout);
8044        }
8045    }
8046    loader_platform_thread_unlock_mutex(&globalLock);
8047    if (VK_FALSE == skipCall)
8048        dev_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount,
8049                                                              pRegions);
8050}
8051
8052VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
8053                                                                  VkImageLayout srcImageLayout, VkBuffer dstBuffer,
8054                                                                  uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8055    VkBool32 skipCall = VK_FALSE;
8056    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8057    loader_platform_thread_lock_mutex(&globalLock);
8058#if MTMERGESOURCE
8059    VkDeviceMemory mem;
8060    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8061    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8062    if (cb_data != dev_data->commandBufferMap.end()) {
8063        std::function<VkBool32()> function =
8064            [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyImageToBuffer()", srcImage); };
8065        cb_data->second->validate_functions.push_back(function);
8066    }
8067    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImageToBuffer");
8068    skipCall |=
8069        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8070    if (cb_data != dev_data->commandBufferMap.end()) {
8071        std::function<VkBool32()> function = [=]() {
8072            set_memory_valid(dev_data, mem, true);
8073            return VK_FALSE;
8074        };
8075        cb_data->second->validate_functions.push_back(function);
8076    }
8077    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImageToBuffer");
8078    // Validate that dst buff & src image have correct usage flags set
8079    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8080                                           "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8081    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8082                                            "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8083#endif
8084    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8085    if (pCB) {
8086        skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
8087        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImageToBuffer");
8088        for (uint32_t i = 0; i < regionCount; ++i) {
8089            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].imageSubresource, srcImageLayout);
8090        }
8091    }
8092    loader_platform_thread_unlock_mutex(&globalLock);
8093    if (VK_FALSE == skipCall)
8094        dev_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount,
8095                                                              pRegions);
8096}
8097
8098VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
8099                                                             VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
8100    VkBool32 skipCall = VK_FALSE;
8101    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8102    loader_platform_thread_lock_mutex(&globalLock);
8103#if MTMERGESOURCE
8104    VkDeviceMemory mem;
8105    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8106    skipCall =
8107        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8108    if (cb_data != dev_data->commandBufferMap.end()) {
8109        std::function<VkBool32()> function = [=]() {
8110            set_memory_valid(dev_data, mem, true);
8111            return VK_FALSE;
8112        };
8113        cb_data->second->validate_functions.push_back(function);
8114    }
8115    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdUpdateBuffer");
8116    // Validate that dst buff has correct usage flags set
8117    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8118                                            "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8119#endif
8120    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8121    if (pCB) {
8122        skipCall |= addCmd(dev_data, pCB, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
8123        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyUpdateBuffer");
8124    }
8125    loader_platform_thread_unlock_mutex(&globalLock);
8126    if (VK_FALSE == skipCall)
8127        dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
8128}
8129
8130VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8131vkCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
8132    VkBool32 skipCall = VK_FALSE;
8133    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8134    loader_platform_thread_lock_mutex(&globalLock);
8135#if MTMERGESOURCE
8136    VkDeviceMemory mem;
8137    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8138    skipCall =
8139        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8140    if (cb_data != dev_data->commandBufferMap.end()) {
8141        std::function<VkBool32()> function = [=]() {
8142            set_memory_valid(dev_data, mem, true);
8143            return VK_FALSE;
8144        };
8145        cb_data->second->validate_functions.push_back(function);
8146    }
8147    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdFillBuffer");
8148    // Validate that dst buff has correct usage flags set
8149    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8150                                            "vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8151#endif
8152    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8153    if (pCB) {
8154        skipCall |= addCmd(dev_data, pCB, CMD_FILLBUFFER, "vkCmdFillBuffer()");
8155        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyFillBuffer");
8156    }
8157    loader_platform_thread_unlock_mutex(&globalLock);
8158    if (VK_FALSE == skipCall)
8159        dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
8160}
8161
8162VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
8163                                                                 const VkClearAttachment *pAttachments, uint32_t rectCount,
8164                                                                 const VkClearRect *pRects) {
8165    VkBool32 skipCall = VK_FALSE;
8166    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8167    loader_platform_thread_lock_mutex(&globalLock);
8168    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8169    if (pCB) {
8170        skipCall |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
8171        // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
8172        if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
8173            (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
8174            // TODO : commandBuffer should be srcObj
8175            // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
8176            // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
8177            // call CmdClearAttachments
8178            // Otherwise this seems more like a performance warning.
8179            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8180                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
8181                                "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
8182                                " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
8183                                (uint64_t)(commandBuffer));
8184        }
8185        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments");
8186    }
8187
8188    // Validate that attachment is in reference list of active subpass
8189    if (pCB->activeRenderPass) {
8190        const VkRenderPassCreateInfo *pRPCI = dev_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo;
8191        const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
8192
8193        for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) {
8194            const VkClearAttachment *attachment = &pAttachments[attachment_idx];
8195            if (attachment->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
8196                VkBool32 found = VK_FALSE;
8197                for (uint32_t i = 0; i < pSD->colorAttachmentCount; i++) {
8198                    if (attachment->colorAttachment == pSD->pColorAttachments[i].attachment) {
8199                        found = VK_TRUE;
8200                        break;
8201                    }
8202                }
8203                if (VK_FALSE == found) {
8204                    skipCall |= log_msg(
8205                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8206                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8207                        "vkCmdClearAttachments() attachment index %d not found in attachment reference array of active subpass %d",
8208                        attachment->colorAttachment, pCB->activeSubpass);
8209                }
8210            } else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
8211                if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
8212                    (pSD->pDepthStencilAttachment->attachment ==
8213                     VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
8214
8215                    skipCall |= log_msg(
8216                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8217                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8218                        "vkCmdClearAttachments() attachment index %d does not match depthStencilAttachment.attachment (%d) found "
8219                        "in active subpass %d",
8220                        attachment->colorAttachment,
8221                        (pSD->pDepthStencilAttachment) ? pSD->pDepthStencilAttachment->attachment : VK_ATTACHMENT_UNUSED,
8222                        pCB->activeSubpass);
8223                }
8224            }
8225        }
8226    }
8227    loader_platform_thread_unlock_mutex(&globalLock);
8228    if (VK_FALSE == skipCall)
8229        dev_data->device_dispatch_table->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
8230}
8231
8232VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
8233                                                                VkImageLayout imageLayout, const VkClearColorValue *pColor,
8234                                                                uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
8235    VkBool32 skipCall = VK_FALSE;
8236    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8237    loader_platform_thread_lock_mutex(&globalLock);
8238#if MTMERGESOURCE
8239    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8240    VkDeviceMemory mem;
8241    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8242    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8243    if (cb_data != dev_data->commandBufferMap.end()) {
8244        std::function<VkBool32()> function = [=]() {
8245            set_memory_valid(dev_data, mem, true, image);
8246            return VK_FALSE;
8247        };
8248        cb_data->second->validate_functions.push_back(function);
8249    }
8250    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearColorImage");
8251#endif
8252    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8253    if (pCB) {
8254        skipCall |= addCmd(dev_data, pCB, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
8255        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearColorImage");
8256    }
8257    loader_platform_thread_unlock_mutex(&globalLock);
8258    if (VK_FALSE == skipCall)
8259        dev_data->device_dispatch_table->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
8260}
8261
8262VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8263vkCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
8264                            const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
8265                            const VkImageSubresourceRange *pRanges) {
8266    VkBool32 skipCall = VK_FALSE;
8267    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8268    loader_platform_thread_lock_mutex(&globalLock);
8269#if MTMERGESOURCE
8270    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8271    VkDeviceMemory mem;
8272    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8273    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8274    if (cb_data != dev_data->commandBufferMap.end()) {
8275        std::function<VkBool32()> function = [=]() {
8276            set_memory_valid(dev_data, mem, true, image);
8277            return VK_FALSE;
8278        };
8279        cb_data->second->validate_functions.push_back(function);
8280    }
8281    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearDepthStencilImage");
8282#endif
8283    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8284    if (pCB) {
8285        skipCall |= addCmd(dev_data, pCB, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
8286        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearDepthStencilImage");
8287    }
8288    loader_platform_thread_unlock_mutex(&globalLock);
8289    if (VK_FALSE == skipCall)
8290        dev_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount,
8291                                                                   pRanges);
8292}
8293
8294VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8295vkCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8296                  VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
8297    VkBool32 skipCall = VK_FALSE;
8298    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8299    loader_platform_thread_lock_mutex(&globalLock);
8300#if MTMERGESOURCE
8301    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8302    VkDeviceMemory mem;
8303    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8304    if (cb_data != dev_data->commandBufferMap.end()) {
8305        std::function<VkBool32()> function =
8306            [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdResolveImage()", srcImage); };
8307        cb_data->second->validate_functions.push_back(function);
8308    }
8309    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdResolveImage");
8310    skipCall |=
8311        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8312    if (cb_data != dev_data->commandBufferMap.end()) {
8313        std::function<VkBool32()> function = [=]() {
8314            set_memory_valid(dev_data, mem, true, dstImage);
8315            return VK_FALSE;
8316        };
8317        cb_data->second->validate_functions.push_back(function);
8318    }
8319    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdResolveImage");
8320#endif
8321    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8322    if (pCB) {
8323        skipCall |= addCmd(dev_data, pCB, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
8324        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResolveImage");
8325    }
8326    loader_platform_thread_unlock_mutex(&globalLock);
8327    if (VK_FALSE == skipCall)
8328        dev_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
8329                                                         regionCount, pRegions);
8330}
8331
8332bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8333    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8334    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8335    if (pCB) {
8336        pCB->eventToStageMap[event] = stageMask;
8337    }
8338    auto queue_data = dev_data->queueMap.find(queue);
8339    if (queue_data != dev_data->queueMap.end()) {
8340        queue_data->second.eventToStageMap[event] = stageMask;
8341    }
8342    return false;
8343}
8344
8345VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8346vkCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8347    VkBool32 skipCall = VK_FALSE;
8348    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8349    loader_platform_thread_lock_mutex(&globalLock);
8350    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8351    if (pCB) {
8352        skipCall |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
8353        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent");
8354        pCB->events.push_back(event);
8355        std::function<bool(VkQueue)> eventUpdate =
8356            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
8357        pCB->eventUpdates.push_back(eventUpdate);
8358    }
8359    loader_platform_thread_unlock_mutex(&globalLock);
8360    if (VK_FALSE == skipCall)
8361        dev_data->device_dispatch_table->CmdSetEvent(commandBuffer, event, stageMask);
8362}
8363
8364VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8365vkCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8366    VkBool32 skipCall = VK_FALSE;
8367    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8368    loader_platform_thread_lock_mutex(&globalLock);
8369    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8370    if (pCB) {
8371        skipCall |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
8372        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent");
8373        pCB->events.push_back(event);
8374        std::function<bool(VkQueue)> eventUpdate =
8375            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
8376        pCB->eventUpdates.push_back(eventUpdate);
8377    }
8378    loader_platform_thread_unlock_mutex(&globalLock);
8379    if (VK_FALSE == skipCall)
8380        dev_data->device_dispatch_table->CmdResetEvent(commandBuffer, event, stageMask);
8381}
8382
8383VkBool32 TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount, const VkImageMemoryBarrier *pImgMemBarriers) {
8384    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8385    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8386    VkBool32 skip = VK_FALSE;
8387    uint32_t levelCount = 0;
8388    uint32_t layerCount = 0;
8389
8390    for (uint32_t i = 0; i < memBarrierCount; ++i) {
8391        auto mem_barrier = &pImgMemBarriers[i];
8392        if (!mem_barrier)
8393            continue;
8394        // TODO: Do not iterate over every possibility - consolidate where
8395        // possible
8396        ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
8397
8398        for (uint32_t j = 0; j < levelCount; j++) {
8399            uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
8400            for (uint32_t k = 0; k < layerCount; k++) {
8401                uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
8402                VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer};
8403                IMAGE_CMD_BUF_LAYOUT_NODE node;
8404                if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
8405                    SetLayout(pCB, mem_barrier->image, sub,
8406                              IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout));
8407                    continue;
8408                }
8409                if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
8410                    // TODO: Set memory invalid which is in mem_tracker currently
8411                } else if (node.layout != mem_barrier->oldLayout) {
8412                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8413                                    __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s "
8414                                                                                    "when current layout is %s.",
8415                                    string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
8416                }
8417                SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
8418            }
8419        }
8420    }
8421    return skip;
8422}
8423
8424// Print readable FlagBits in FlagMask
8425std::string string_VkAccessFlags(VkAccessFlags accessMask) {
8426    std::string result;
8427    std::string separator;
8428
8429    if (accessMask == 0) {
8430        result = "[None]";
8431    } else {
8432        result = "[";
8433        for (auto i = 0; i < 32; i++) {
8434            if (accessMask & (1 << i)) {
8435                result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
8436                separator = " | ";
8437            }
8438        }
8439        result = result + "]";
8440    }
8441    return result;
8442}
8443
8444// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
8445// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
8446// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
8447VkBool32 ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8448                          const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits, const char *type) {
8449    VkBool32 skip_call = VK_FALSE;
8450
8451    if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
8452        if (accessMask & !(required_bit | optional_bits)) {
8453            // TODO: Verify against Valid Use
8454            skip_call |=
8455                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8456                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.",
8457                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8458        }
8459    } else {
8460        if (!required_bit) {
8461            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8462                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
8463                                                                  "%s when layout is %s, unless the app has previously added a "
8464                                                                  "barrier for this transition.",
8465                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
8466                                 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
8467        } else {
8468            std::string opt_bits;
8469            if (optional_bits != 0) {
8470                std::stringstream ss;
8471                ss << optional_bits;
8472                opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
8473            }
8474            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8475                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
8476                                                                  "layout is %s, unless the app has previously added a barrier for "
8477                                                                  "this transition.",
8478                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
8479                                 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
8480        }
8481    }
8482    return skip_call;
8483}
8484
8485VkBool32 ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8486                                     const VkImageLayout &layout, const char *type) {
8487    VkBool32 skip_call = VK_FALSE;
8488    switch (layout) {
8489    case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
8490        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
8491                                      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type);
8492        break;
8493    }
8494    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
8495        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
8496                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type);
8497        break;
8498    }
8499    case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
8500        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
8501        break;
8502    }
8503    case VK_IMAGE_LAYOUT_PREINITIALIZED: {
8504        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type);
8505        break;
8506    }
8507    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
8508        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8509                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8510        break;
8511    }
8512    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
8513        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8514                                      VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8515        break;
8516    }
8517    case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
8518        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
8519        break;
8520    }
8521    case VK_IMAGE_LAYOUT_UNDEFINED: {
8522        if (accessMask != 0) {
8523            // TODO: Verify against Valid Use section spec
8524            skip_call |=
8525                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8526                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.",
8527                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8528        }
8529        break;
8530    }
8531    case VK_IMAGE_LAYOUT_GENERAL:
8532    default: { break; }
8533    }
8534    return skip_call;
8535}
8536
8537VkBool32 ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8538                          const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
8539                          const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
8540                          const VkImageMemoryBarrier *pImageMemBarriers) {
8541    VkBool32 skip_call = VK_FALSE;
8542    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8543    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8544    if (pCB->activeRenderPass && memBarrierCount) {
8545        if (!dev_data->renderPassMap[pCB->activeRenderPass]->hasSelfDependency[pCB->activeSubpass]) {
8546            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8547                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
8548                                                                  "with no self dependency specified.",
8549                                 funcName, pCB->activeSubpass);
8550        }
8551    }
8552    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
8553        auto mem_barrier = &pImageMemBarriers[i];
8554        auto image_data = dev_data->imageMap.find(mem_barrier->image);
8555        if (image_data != dev_data->imageMap.end()) {
8556            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
8557            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
8558            if (image_data->second.createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
8559                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
8560                // be VK_QUEUE_FAMILY_IGNORED
8561                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
8562                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8563                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8564                                         "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
8565                                         "VK_SHARING_MODE_CONCURRENT.  Src and dst "
8566                                         " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
8567                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8568                }
8569            } else {
8570                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
8571                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
8572                // or both be a valid queue family
8573                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
8574                    (src_q_f_index != dst_q_f_index)) {
8575                    skip_call |=
8576                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8577                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
8578                                                                     "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
8579                                                                     "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
8580                                                                     "must be.",
8581                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8582                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
8583                           ((src_q_f_index >= dev_data->physDevProperties.queue_family_properties.size()) ||
8584                            (dst_q_f_index >= dev_data->physDevProperties.queue_family_properties.size()))) {
8585                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8586                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8587                                         "%s: Image 0x%" PRIx64 " was created with sharingMode "
8588                                         "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
8589                                         " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
8590                                         "queueFamilies crated for this device.",
8591                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index,
8592                                         dst_q_f_index, dev_data->physDevProperties.queue_family_properties.size());
8593                }
8594            }
8595        }
8596
8597        if (mem_barrier) {
8598            skip_call |=
8599                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
8600            skip_call |=
8601                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
8602            if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
8603                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8604                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
8605                                                         "PREINITIALIZED.",
8606                        funcName);
8607            }
8608            auto image_data = dev_data->imageMap.find(mem_barrier->image);
8609            VkFormat format;
8610            uint32_t arrayLayers, mipLevels;
8611            bool imageFound = false;
8612            if (image_data != dev_data->imageMap.end()) {
8613                format = image_data->second.createInfo.format;
8614                arrayLayers = image_data->second.createInfo.arrayLayers;
8615                mipLevels = image_data->second.createInfo.mipLevels;
8616                imageFound = true;
8617            } else if (dev_data->device_extensions.wsi_enabled) {
8618                auto imageswap_data = dev_data->device_extensions.imageToSwapchainMap.find(mem_barrier->image);
8619                if (imageswap_data != dev_data->device_extensions.imageToSwapchainMap.end()) {
8620                    auto swapchain_data = dev_data->device_extensions.swapchainMap.find(imageswap_data->second);
8621                    if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
8622                        format = swapchain_data->second->createInfo.imageFormat;
8623                        arrayLayers = swapchain_data->second->createInfo.imageArrayLayers;
8624                        mipLevels = 1;
8625                        imageFound = true;
8626                    }
8627                }
8628            }
8629            if (imageFound) {
8630                if (vk_format_is_depth_and_stencil(format) &&
8631                    (!(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) ||
8632                     !(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT))) {
8633                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8634                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth and stencil format and thus must "
8635                                                             "have both VK_IMAGE_ASPECT_DEPTH_BIT and "
8636                                                             "VK_IMAGE_ASPECT_STENCIL_BIT set.",
8637                            funcName);
8638                }
8639                int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
8640                                     ? 1
8641                                     : mem_barrier->subresourceRange.layerCount;
8642                if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
8643                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8644                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
8645                                                             "baseArrayLayer (%d) and layerCount (%d) be less "
8646                                                             "than or equal to the total number of layers (%d).",
8647                            funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount,
8648                            arrayLayers);
8649                }
8650                int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
8651                                     ? 1
8652                                     : mem_barrier->subresourceRange.levelCount;
8653                if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
8654                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8655                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
8656                                                             "(%d) and levelCount (%d) be less than or equal to "
8657                                                             "the total number of levels (%d).",
8658                            funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount,
8659                            mipLevels);
8660                }
8661            }
8662        }
8663    }
8664    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
8665        auto mem_barrier = &pBufferMemBarriers[i];
8666        if (pCB->activeRenderPass) {
8667            skip_call |=
8668                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8669                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
8670        }
8671        if (!mem_barrier)
8672            continue;
8673
8674        // Validate buffer barrier queue family indices
8675        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8676             mem_barrier->srcQueueFamilyIndex >= dev_data->physDevProperties.queue_family_properties.size()) ||
8677            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8678             mem_barrier->dstQueueFamilyIndex >= dev_data->physDevProperties.queue_family_properties.size())) {
8679            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8680                                 DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8681                                 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
8682                                 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
8683                                 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8684                                 dev_data->physDevProperties.queue_family_properties.size());
8685        }
8686
8687        auto buffer_data = dev_data->bufferMap.find(mem_barrier->buffer);
8688        uint64_t buffer_size =
8689            buffer_data->second.create_info ? reinterpret_cast<uint64_t &>(buffer_data->second.create_info->size) : 0;
8690        if (buffer_data != dev_data->bufferMap.end()) {
8691            if (mem_barrier->offset >= buffer_size) {
8692                skip_call |=
8693                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8694                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64
8695                                                             " whose sum is not less than total size %" PRIu64 ".",
8696                            funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8697                            reinterpret_cast<const uint64_t &>(mem_barrier->offset), buffer_size);
8698            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
8699                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8700                                     __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
8701                                     "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64 " and size %" PRIu64
8702                                     " whose sum is greater than total size %" PRIu64 ".",
8703                                     funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8704                                     reinterpret_cast<const uint64_t &>(mem_barrier->offset),
8705                                     reinterpret_cast<const uint64_t &>(mem_barrier->size), buffer_size);
8706            }
8707        }
8708    }
8709    return skip_call;
8710}
8711
8712bool validateEventStageMask(VkQueue queue, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask) {
8713    bool skip_call = false;
8714    VkPipelineStageFlags stageMask = 0;
8715    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
8716    for (uint32_t i = 0; i < eventCount; ++i) {
8717        auto queue_data = dev_data->queueMap.find(queue);
8718        if (queue_data == dev_data->queueMap.end())
8719            return false;
8720        auto event_data = queue_data->second.eventToStageMap.find(pEvents[i]);
8721        if (event_data != queue_data->second.eventToStageMap.end()) {
8722            stageMask |= event_data->second;
8723        } else {
8724            auto global_event_data = dev_data->eventMap.find(pEvents[i]);
8725            if (global_event_data == dev_data->eventMap.end()) {
8726                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
8727                                     reinterpret_cast<const uint64_t &>(pEvents[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
8728                                     "Fence 0x%" PRIx64 " cannot be waited on if it has never been set.",
8729                                     reinterpret_cast<const uint64_t &>(pEvents[i]));
8730            } else {
8731                stageMask |= global_event_data->second.stageMask;
8732            }
8733        }
8734    }
8735    if (sourceStageMask != stageMask) {
8736        skip_call |=
8737            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8738                    DRAWSTATE_INVALID_FENCE, "DS",
8739                    "Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%x which must be the bitwise OR of the "
8740                    "stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with vkSetEvent.",
8741                    sourceStageMask);
8742    }
8743    return skip_call;
8744}
8745
8746VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8747vkCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask,
8748                VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8749                uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8750                uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8751    VkBool32 skipCall = VK_FALSE;
8752    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8753    loader_platform_thread_lock_mutex(&globalLock);
8754    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8755    if (pCB) {
8756        for (uint32_t i = 0; i < eventCount; ++i) {
8757            pCB->waitedEvents.push_back(pEvents[i]);
8758            pCB->events.push_back(pEvents[i]);
8759        }
8760        std::function<bool(VkQueue)> eventUpdate =
8761            std::bind(validateEventStageMask, std::placeholders::_1, eventCount, pEvents, sourceStageMask);
8762        pCB->eventUpdates.push_back(eventUpdate);
8763        if (pCB->state == CB_RECORDING) {
8764            skipCall |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()");
8765        } else {
8766            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
8767        }
8768        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8769        skipCall |=
8770            ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8771                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8772    }
8773    loader_platform_thread_unlock_mutex(&globalLock);
8774    if (VK_FALSE == skipCall)
8775        dev_data->device_dispatch_table->CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
8776                                                       memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8777                                                       pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8778}
8779
8780VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8781vkCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
8782                     VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8783                     uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8784                     uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8785    VkBool32 skipCall = VK_FALSE;
8786    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8787    loader_platform_thread_lock_mutex(&globalLock);
8788    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8789    if (pCB) {
8790        skipCall |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
8791        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8792        skipCall |=
8793            ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8794                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8795    }
8796    loader_platform_thread_unlock_mutex(&globalLock);
8797    if (VK_FALSE == skipCall)
8798        dev_data->device_dispatch_table->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
8799                                                            memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8800                                                            pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8801}
8802
8803VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8804vkCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
8805    VkBool32 skipCall = VK_FALSE;
8806    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8807    loader_platform_thread_lock_mutex(&globalLock);
8808    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8809    if (pCB) {
8810        QueryObject query = {queryPool, slot};
8811        pCB->activeQueries.insert(query);
8812        if (!pCB->startedQueries.count(query)) {
8813            pCB->startedQueries.insert(query);
8814        }
8815        skipCall |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
8816    }
8817    loader_platform_thread_unlock_mutex(&globalLock);
8818    if (VK_FALSE == skipCall)
8819        dev_data->device_dispatch_table->CmdBeginQuery(commandBuffer, queryPool, slot, flags);
8820}
8821
8822VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
8823    VkBool32 skipCall = VK_FALSE;
8824    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8825    loader_platform_thread_lock_mutex(&globalLock);
8826    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8827    if (pCB) {
8828        QueryObject query = {queryPool, slot};
8829        if (!pCB->activeQueries.count(query)) {
8830            skipCall |=
8831                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8832                        DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool %" PRIu64 ", index %d",
8833                        (uint64_t)(queryPool), slot);
8834        } else {
8835            pCB->activeQueries.erase(query);
8836        }
8837        pCB->queryToStateMap[query] = 1;
8838        if (pCB->state == CB_RECORDING) {
8839            skipCall |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
8840        } else {
8841            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
8842        }
8843    }
8844    loader_platform_thread_unlock_mutex(&globalLock);
8845    if (VK_FALSE == skipCall)
8846        dev_data->device_dispatch_table->CmdEndQuery(commandBuffer, queryPool, slot);
8847}
8848
8849VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8850vkCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
8851    VkBool32 skipCall = VK_FALSE;
8852    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8853    loader_platform_thread_lock_mutex(&globalLock);
8854    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8855    if (pCB) {
8856        for (uint32_t i = 0; i < queryCount; i++) {
8857            QueryObject query = {queryPool, firstQuery + i};
8858            pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
8859            pCB->queryToStateMap[query] = 0;
8860        }
8861        if (pCB->state == CB_RECORDING) {
8862            skipCall |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
8863        } else {
8864            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
8865        }
8866        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool");
8867    }
8868    loader_platform_thread_unlock_mutex(&globalLock);
8869    if (VK_FALSE == skipCall)
8870        dev_data->device_dispatch_table->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
8871}
8872
8873VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8874vkCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
8875                          VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
8876    VkBool32 skipCall = VK_FALSE;
8877    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8878    loader_platform_thread_lock_mutex(&globalLock);
8879    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8880#if MTMERGESOURCE
8881    VkDeviceMemory mem;
8882    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8883    skipCall |=
8884        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8885    if (cb_data != dev_data->commandBufferMap.end()) {
8886        std::function<VkBool32()> function = [=]() {
8887            set_memory_valid(dev_data, mem, true);
8888            return VK_FALSE;
8889        };
8890        cb_data->second->validate_functions.push_back(function);
8891    }
8892    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyQueryPoolResults");
8893    // Validate that DST buffer has correct usage flags set
8894    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8895                                            "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8896#endif
8897    if (pCB) {
8898        for (uint32_t i = 0; i < queryCount; i++) {
8899            QueryObject query = {queryPool, firstQuery + i};
8900            if (!pCB->queryToStateMap[query]) {
8901                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8902                                    __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
8903                                    "Requesting a copy from query to buffer with invalid query: queryPool %" PRIu64 ", index %d",
8904                                    (uint64_t)(queryPool), firstQuery + i);
8905            }
8906        }
8907        if (pCB->state == CB_RECORDING) {
8908            skipCall |= addCmd(dev_data, pCB, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
8909        } else {
8910            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
8911        }
8912        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyQueryPoolResults");
8913    }
8914    loader_platform_thread_unlock_mutex(&globalLock);
8915    if (VK_FALSE == skipCall)
8916        dev_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer,
8917                                                                 dstOffset, stride, flags);
8918}
8919
8920VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
8921                                                              VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
8922                                                              const void *pValues) {
8923    bool skipCall = false;
8924    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8925    loader_platform_thread_lock_mutex(&globalLock);
8926    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8927    if (pCB) {
8928        if (pCB->state == CB_RECORDING) {
8929            skipCall |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
8930        } else {
8931            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
8932        }
8933    }
8934    if ((offset + size) > dev_data->physDevProperties.properties.limits.maxPushConstantsSize) {
8935        skipCall |= validatePushConstantSize(dev_data, offset, size, "vkCmdPushConstants()");
8936    }
8937    // TODO : Add warning if push constant update doesn't align with range
8938    loader_platform_thread_unlock_mutex(&globalLock);
8939    if (!skipCall)
8940        dev_data->device_dispatch_table->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
8941}
8942
8943VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8944vkCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
8945    VkBool32 skipCall = VK_FALSE;
8946    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8947    loader_platform_thread_lock_mutex(&globalLock);
8948    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8949    if (pCB) {
8950        QueryObject query = {queryPool, slot};
8951        pCB->queryToStateMap[query] = 1;
8952        if (pCB->state == CB_RECORDING) {
8953            skipCall |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
8954        } else {
8955            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
8956        }
8957    }
8958    loader_platform_thread_unlock_mutex(&globalLock);
8959    if (VK_FALSE == skipCall)
8960        dev_data->device_dispatch_table->CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
8961}
8962
8963VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
8964                                                                   const VkAllocationCallbacks *pAllocator,
8965                                                                   VkFramebuffer *pFramebuffer) {
8966    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8967    VkResult result = dev_data->device_dispatch_table->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
8968    if (VK_SUCCESS == result) {
8969        // Shadow create info and store in map
8970        VkFramebufferCreateInfo *localFBCI = new VkFramebufferCreateInfo(*pCreateInfo);
8971        if (pCreateInfo->pAttachments) {
8972            localFBCI->pAttachments = new VkImageView[localFBCI->attachmentCount];
8973            memcpy((void *)localFBCI->pAttachments, pCreateInfo->pAttachments, localFBCI->attachmentCount * sizeof(VkImageView));
8974        }
8975        FRAMEBUFFER_NODE fbNode = {};
8976        fbNode.createInfo = *localFBCI;
8977        std::pair<VkFramebuffer, FRAMEBUFFER_NODE> fbPair(*pFramebuffer, fbNode);
8978        loader_platform_thread_lock_mutex(&globalLock);
8979        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8980            VkImageView view = pCreateInfo->pAttachments[i];
8981            auto view_data = dev_data->imageViewMap.find(view);
8982            if (view_data == dev_data->imageViewMap.end()) {
8983                continue;
8984            }
8985            MT_FB_ATTACHMENT_INFO fb_info;
8986            get_mem_binding_from_object(dev_data, device, (uint64_t)(view_data->second.image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8987                                        &fb_info.mem);
8988            fb_info.image = view_data->second.image;
8989            fbPair.second.attachments.push_back(fb_info);
8990        }
8991        dev_data->frameBufferMap.insert(fbPair);
8992        loader_platform_thread_unlock_mutex(&globalLock);
8993    }
8994    return result;
8995}
8996
8997VkBool32 FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
8998                        std::unordered_set<uint32_t> &processed_nodes) {
8999    // If we have already checked this node we have not found a dependency path so return false.
9000    if (processed_nodes.count(index))
9001        return VK_FALSE;
9002    processed_nodes.insert(index);
9003    const DAGNode &node = subpass_to_node[index];
9004    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
9005    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
9006        for (auto elem : node.prev) {
9007            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
9008                return VK_TRUE;
9009        }
9010    } else {
9011        return VK_TRUE;
9012    }
9013    return VK_FALSE;
9014}
9015
9016VkBool32 CheckDependencyExists(const layer_data *my_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
9017                               const std::vector<DAGNode> &subpass_to_node, VkBool32 &skip_call) {
9018    VkBool32 result = VK_TRUE;
9019    // Loop through all subpasses that share the same attachment and make sure a dependency exists
9020    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
9021        if (subpass == dependent_subpasses[k])
9022            continue;
9023        const DAGNode &node = subpass_to_node[subpass];
9024        // Check for a specified dependency between the two nodes. If one exists we are done.
9025        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
9026        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
9027        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
9028            // If no dependency exits an implicit dependency still might. If so, warn and if not throw an error.
9029            std::unordered_set<uint32_t> processed_nodes;
9030            if (FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
9031                FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes)) {
9032                // TODO: Verify against Valid Use section of spec
9033                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9034                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9035                                     "A dependency between subpasses %d and %d must exist but only an implicit one is specified.",
9036                                     subpass, dependent_subpasses[k]);
9037            } else {
9038                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9039                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9040                                     "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
9041                                     dependent_subpasses[k]);
9042                result = VK_FALSE;
9043            }
9044        }
9045    }
9046    return result;
9047}
9048
9049VkBool32 CheckPreserved(const layer_data *my_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
9050                        const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, VkBool32 &skip_call) {
9051    const DAGNode &node = subpass_to_node[index];
9052    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
9053    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9054    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9055        if (attachment == subpass.pColorAttachments[j].attachment)
9056            return VK_TRUE;
9057    }
9058    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9059        if (attachment == subpass.pDepthStencilAttachment->attachment)
9060            return VK_TRUE;
9061    }
9062    VkBool32 result = VK_FALSE;
9063    // Loop through previous nodes and see if any of them write to the attachment.
9064    for (auto elem : node.prev) {
9065        result |= CheckPreserved(my_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
9066    }
9067    // If the attachment was written to by a previous node than this node needs to preserve it.
9068    if (result && depth > 0) {
9069        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9070        VkBool32 has_preserved = VK_FALSE;
9071        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9072            if (subpass.pPreserveAttachments[j] == attachment) {
9073                has_preserved = VK_TRUE;
9074                break;
9075            }
9076        }
9077        if (has_preserved == VK_FALSE) {
9078            skip_call |=
9079                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9080                        DRAWSTATE_INVALID_RENDERPASS, "DS",
9081                        "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
9082        }
9083    }
9084    return result;
9085}
9086
9087template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
9088    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
9089           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
9090}
9091
9092bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
9093    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
9094            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
9095}
9096
9097VkBool32 ValidateDependencies(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin,
9098                              const std::vector<DAGNode> &subpass_to_node) {
9099    VkBool32 skip_call = VK_FALSE;
9100    const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo;
9101    const VkRenderPassCreateInfo *pCreateInfo = my_data->renderPassMap.at(pRenderPassBegin->renderPass)->pCreateInfo;
9102    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
9103    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
9104    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
9105    // Find overlapping attachments
9106    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9107        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
9108            VkImageView viewi = pFramebufferInfo->pAttachments[i];
9109            VkImageView viewj = pFramebufferInfo->pAttachments[j];
9110            if (viewi == viewj) {
9111                overlapping_attachments[i].push_back(j);
9112                overlapping_attachments[j].push_back(i);
9113                continue;
9114            }
9115            auto view_data_i = my_data->imageViewMap.find(viewi);
9116            auto view_data_j = my_data->imageViewMap.find(viewj);
9117            if (view_data_i == my_data->imageViewMap.end() || view_data_j == my_data->imageViewMap.end()) {
9118                continue;
9119            }
9120            if (view_data_i->second.image == view_data_j->second.image &&
9121                isRegionOverlapping(view_data_i->second.subresourceRange, view_data_j->second.subresourceRange)) {
9122                overlapping_attachments[i].push_back(j);
9123                overlapping_attachments[j].push_back(i);
9124                continue;
9125            }
9126            auto image_data_i = my_data->imageMap.find(view_data_i->second.image);
9127            auto image_data_j = my_data->imageMap.find(view_data_j->second.image);
9128            if (image_data_i == my_data->imageMap.end() || image_data_j == my_data->imageMap.end()) {
9129                continue;
9130            }
9131            if (image_data_i->second.mem == image_data_j->second.mem &&
9132                isRangeOverlapping(image_data_i->second.memOffset, image_data_i->second.memSize, image_data_j->second.memOffset,
9133                                   image_data_j->second.memSize)) {
9134                overlapping_attachments[i].push_back(j);
9135                overlapping_attachments[j].push_back(i);
9136            }
9137        }
9138    }
9139    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
9140        uint32_t attachment = i;
9141        for (auto other_attachment : overlapping_attachments[i]) {
9142            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9143                skip_call |=
9144                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9145                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9146                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9147                            attachment, other_attachment);
9148            }
9149            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9150                skip_call |=
9151                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9152                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9153                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9154                            other_attachment, attachment);
9155            }
9156        }
9157    }
9158    // Find for each attachment the subpasses that use them.
9159    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9160        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9161        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9162            uint32_t attachment = subpass.pInputAttachments[j].attachment;
9163            input_attachment_to_subpass[attachment].push_back(i);
9164            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9165                input_attachment_to_subpass[overlapping_attachment].push_back(i);
9166            }
9167        }
9168        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9169            uint32_t attachment = subpass.pColorAttachments[j].attachment;
9170            output_attachment_to_subpass[attachment].push_back(i);
9171            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9172                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9173            }
9174        }
9175        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9176            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9177            output_attachment_to_subpass[attachment].push_back(i);
9178            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9179                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9180            }
9181        }
9182    }
9183    // If there is a dependency needed make sure one exists
9184    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9185        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9186        // If the attachment is an input then all subpasses that output must have a dependency relationship
9187        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9188            const uint32_t &attachment = subpass.pInputAttachments[j].attachment;
9189            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9190        }
9191        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
9192        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9193            const uint32_t &attachment = subpass.pColorAttachments[j].attachment;
9194            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9195            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9196        }
9197        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9198            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
9199            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9200            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9201        }
9202    }
9203    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
9204    // written.
9205    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9206        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9207        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9208            CheckPreserved(my_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
9209        }
9210    }
9211    return skip_call;
9212}
9213
9214VkBool32 ValidateLayouts(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
9215    VkBool32 skip = VK_FALSE;
9216
9217    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9218        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9219        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9220            if (subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL &&
9221                subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
9222                if (subpass.pInputAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
9223                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9224                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9225                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9226                                    "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
9227                } else {
9228                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9229                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9230                                    "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
9231                                    string_VkImageLayout(subpass.pInputAttachments[j].layout));
9232                }
9233            }
9234        }
9235        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9236            if (subpass.pColorAttachments[j].layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
9237                if (subpass.pColorAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
9238                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9239                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9240                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9241                                    "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
9242                } else {
9243                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9244                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9245                                    "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
9246                                    string_VkImageLayout(subpass.pColorAttachments[j].layout));
9247                }
9248            }
9249        }
9250        if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
9251            if (subpass.pDepthStencilAttachment->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) {
9252                if (subpass.pDepthStencilAttachment->layout == VK_IMAGE_LAYOUT_GENERAL) {
9253                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9254                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9255                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9256                                    "Layout for depth attachment is GENERAL but should be DEPTH_STENCIL_ATTACHMENT_OPTIMAL.");
9257                } else {
9258                    skip |=
9259                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9260                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9261                                "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL or GENERAL.",
9262                                string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
9263                }
9264            }
9265        }
9266    }
9267    return skip;
9268}
9269
9270VkBool32 CreatePassDAG(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9271                       std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
9272    VkBool32 skip_call = VK_FALSE;
9273    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9274        DAGNode &subpass_node = subpass_to_node[i];
9275        subpass_node.pass = i;
9276    }
9277    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
9278        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
9279        if (dependency.srcSubpass > dependency.dstSubpass && dependency.srcSubpass != VK_SUBPASS_EXTERNAL &&
9280            dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
9281            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9282                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
9283                                 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
9284        } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL && dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
9285            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9286                                 DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
9287        } else if (dependency.srcSubpass == dependency.dstSubpass) {
9288            has_self_dependency[dependency.srcSubpass] = true;
9289        }
9290        if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
9291            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
9292        }
9293        if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL) {
9294            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
9295        }
9296    }
9297    return skip_call;
9298}
9299
9300
9301VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
9302                                                                    const VkAllocationCallbacks *pAllocator,
9303                                                                    VkShaderModule *pShaderModule) {
9304    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9305    VkBool32 skip_call = VK_FALSE;
9306    if (!shader_is_spirv(pCreateInfo)) {
9307        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
9308                             /* dev */ 0, __LINE__, SHADER_CHECKER_NON_SPIRV_SHADER, "SC", "Shader is not SPIR-V");
9309    }
9310
9311    if (VK_FALSE != skip_call)
9312        return VK_ERROR_VALIDATION_FAILED_EXT;
9313
9314    VkResult res = my_data->device_dispatch_table->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
9315
9316    if (res == VK_SUCCESS) {
9317        loader_platform_thread_lock_mutex(&globalLock);
9318        my_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
9319        loader_platform_thread_unlock_mutex(&globalLock);
9320    }
9321    return res;
9322}
9323
9324VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9325                                                                  const VkAllocationCallbacks *pAllocator,
9326                                                                  VkRenderPass *pRenderPass) {
9327    VkBool32 skip_call = VK_FALSE;
9328    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9329    loader_platform_thread_lock_mutex(&globalLock);
9330    // Create DAG
9331    std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
9332    std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
9333    skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
9334    // Validate
9335    skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
9336    if (VK_FALSE != skip_call) {
9337        loader_platform_thread_unlock_mutex(&globalLock);
9338        return VK_ERROR_VALIDATION_FAILED_EXT;
9339    }
9340    loader_platform_thread_unlock_mutex(&globalLock);
9341    VkResult result = dev_data->device_dispatch_table->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
9342    if (VK_SUCCESS == result) {
9343        loader_platform_thread_lock_mutex(&globalLock);
9344        // TODOSC : Merge in tracking of renderpass from shader_checker
9345        // Shadow create info and store in map
9346        VkRenderPassCreateInfo *localRPCI = new VkRenderPassCreateInfo(*pCreateInfo);
9347        if (pCreateInfo->pAttachments) {
9348            localRPCI->pAttachments = new VkAttachmentDescription[localRPCI->attachmentCount];
9349            memcpy((void *)localRPCI->pAttachments, pCreateInfo->pAttachments,
9350                   localRPCI->attachmentCount * sizeof(VkAttachmentDescription));
9351        }
9352        if (pCreateInfo->pSubpasses) {
9353            localRPCI->pSubpasses = new VkSubpassDescription[localRPCI->subpassCount];
9354            memcpy((void *)localRPCI->pSubpasses, pCreateInfo->pSubpasses, localRPCI->subpassCount * sizeof(VkSubpassDescription));
9355
9356            for (uint32_t i = 0; i < localRPCI->subpassCount; i++) {
9357                VkSubpassDescription *subpass = (VkSubpassDescription *)&localRPCI->pSubpasses[i];
9358                const uint32_t attachmentCount = subpass->inputAttachmentCount +
9359                                                 subpass->colorAttachmentCount * (1 + (subpass->pResolveAttachments ? 1 : 0)) +
9360                                                 ((subpass->pDepthStencilAttachment) ? 1 : 0) + subpass->preserveAttachmentCount;
9361                VkAttachmentReference *attachments = new VkAttachmentReference[attachmentCount];
9362
9363                memcpy(attachments, subpass->pInputAttachments, sizeof(attachments[0]) * subpass->inputAttachmentCount);
9364                subpass->pInputAttachments = attachments;
9365                attachments += subpass->inputAttachmentCount;
9366
9367                memcpy(attachments, subpass->pColorAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
9368                subpass->pColorAttachments = attachments;
9369                attachments += subpass->colorAttachmentCount;
9370
9371                if (subpass->pResolveAttachments) {
9372                    memcpy(attachments, subpass->pResolveAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
9373                    subpass->pResolveAttachments = attachments;
9374                    attachments += subpass->colorAttachmentCount;
9375                }
9376
9377                if (subpass->pDepthStencilAttachment) {
9378                    memcpy(attachments, subpass->pDepthStencilAttachment, sizeof(attachments[0]) * 1);
9379                    subpass->pDepthStencilAttachment = attachments;
9380                    attachments += 1;
9381                }
9382
9383                memcpy(attachments, subpass->pPreserveAttachments, sizeof(attachments[0]) * subpass->preserveAttachmentCount);
9384                subpass->pPreserveAttachments = &attachments->attachment;
9385            }
9386        }
9387        if (pCreateInfo->pDependencies) {
9388            localRPCI->pDependencies = new VkSubpassDependency[localRPCI->dependencyCount];
9389            memcpy((void *)localRPCI->pDependencies, pCreateInfo->pDependencies,
9390                   localRPCI->dependencyCount * sizeof(VkSubpassDependency));
9391        }
9392        dev_data->renderPassMap[*pRenderPass] = new RENDER_PASS_NODE(localRPCI);
9393        dev_data->renderPassMap[*pRenderPass]->hasSelfDependency = has_self_dependency;
9394        dev_data->renderPassMap[*pRenderPass]->subpassToNode = subpass_to_node;
9395#if MTMERGESOURCE
9396        // MTMTODO : Merge with code from above to eliminate duplication
9397        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9398            VkAttachmentDescription desc = pCreateInfo->pAttachments[i];
9399            MT_PASS_ATTACHMENT_INFO pass_info;
9400            pass_info.load_op = desc.loadOp;
9401            pass_info.store_op = desc.storeOp;
9402            pass_info.attachment = i;
9403            dev_data->renderPassMap[*pRenderPass]->attachments.push_back(pass_info);
9404        }
9405        // TODO: Maybe fill list and then copy instead of locking
9406        std::unordered_map<uint32_t, bool> &attachment_first_read = dev_data->renderPassMap[*pRenderPass]->attachment_first_read;
9407        std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout =
9408            dev_data->renderPassMap[*pRenderPass]->attachment_first_layout;
9409        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9410            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9411            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9412                uint32_t attachment = subpass.pInputAttachments[j].attachment;
9413                if (attachment_first_read.count(attachment))
9414                    continue;
9415                attachment_first_read.insert(std::make_pair(attachment, true));
9416                attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
9417            }
9418            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9419                uint32_t attachment = subpass.pColorAttachments[j].attachment;
9420                if (attachment_first_read.count(attachment))
9421                    continue;
9422                attachment_first_read.insert(std::make_pair(attachment, false));
9423                attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
9424            }
9425            if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9426                uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9427                if (attachment_first_read.count(attachment))
9428                    continue;
9429                attachment_first_read.insert(std::make_pair(attachment, false));
9430                attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
9431            }
9432        }
9433#endif
9434        loader_platform_thread_unlock_mutex(&globalLock);
9435    }
9436    return result;
9437}
9438// Free the renderpass shadow
9439static void deleteRenderPasses(layer_data *my_data) {
9440    if (my_data->renderPassMap.size() <= 0)
9441        return;
9442    for (auto ii = my_data->renderPassMap.begin(); ii != my_data->renderPassMap.end(); ++ii) {
9443        const VkRenderPassCreateInfo *pRenderPassInfo = (*ii).second->pCreateInfo;
9444        delete[] pRenderPassInfo->pAttachments;
9445        if (pRenderPassInfo->pSubpasses) {
9446            for (uint32_t i = 0; i < pRenderPassInfo->subpassCount; ++i) {
9447                // Attachements are all allocated in a block, so just need to
9448                //  find the first non-null one to delete
9449                if (pRenderPassInfo->pSubpasses[i].pInputAttachments) {
9450                    delete[] pRenderPassInfo->pSubpasses[i].pInputAttachments;
9451                } else if (pRenderPassInfo->pSubpasses[i].pColorAttachments) {
9452                    delete[] pRenderPassInfo->pSubpasses[i].pColorAttachments;
9453                } else if (pRenderPassInfo->pSubpasses[i].pResolveAttachments) {
9454                    delete[] pRenderPassInfo->pSubpasses[i].pResolveAttachments;
9455                } else if (pRenderPassInfo->pSubpasses[i].pPreserveAttachments) {
9456                    delete[] pRenderPassInfo->pSubpasses[i].pPreserveAttachments;
9457                }
9458            }
9459            delete[] pRenderPassInfo->pSubpasses;
9460        }
9461        delete[] pRenderPassInfo->pDependencies;
9462        delete pRenderPassInfo;
9463        delete (*ii).second;
9464    }
9465    my_data->renderPassMap.clear();
9466}
9467
9468VkBool32 VerifyFramebufferAndRenderPassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) {
9469    VkBool32 skip_call = VK_FALSE;
9470    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9471    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9472    const VkRenderPassCreateInfo *pRenderPassInfo = dev_data->renderPassMap[pRenderPassBegin->renderPass]->pCreateInfo;
9473    const VkFramebufferCreateInfo framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer].createInfo;
9474    if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
9475        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9476                             DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
9477                                                                 "with a different number of attachments.");
9478    }
9479    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9480        const VkImageView &image_view = framebufferInfo.pAttachments[i];
9481        auto image_data = dev_data->imageViewMap.find(image_view);
9482        assert(image_data != dev_data->imageViewMap.end());
9483        const VkImage &image = image_data->second.image;
9484        const VkImageSubresourceRange &subRange = image_data->second.subresourceRange;
9485        IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
9486                                             pRenderPassInfo->pAttachments[i].initialLayout};
9487        // TODO: Do not iterate over every possibility - consolidate where possible
9488        for (uint32_t j = 0; j < subRange.levelCount; j++) {
9489            uint32_t level = subRange.baseMipLevel + j;
9490            for (uint32_t k = 0; k < subRange.layerCount; k++) {
9491                uint32_t layer = subRange.baseArrayLayer + k;
9492                VkImageSubresource sub = {subRange.aspectMask, level, layer};
9493                IMAGE_CMD_BUF_LAYOUT_NODE node;
9494                if (!FindLayout(pCB, image, sub, node)) {
9495                    SetLayout(pCB, image, sub, newNode);
9496                    continue;
9497                }
9498                if (newNode.layout != node.layout) {
9499                    skip_call |=
9500                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9501                                DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using attachment %i "
9502                                                                    "where the "
9503                                                                    "initial layout is %s and the layout of the attachment at the "
9504                                                                    "start of the render pass is %s. The layouts must match.",
9505                                i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout));
9506                }
9507            }
9508        }
9509    }
9510    return skip_call;
9511}
9512
9513void TransitionSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const int subpass_index) {
9514    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9515    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9516    auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9517    if (render_pass_data == dev_data->renderPassMap.end()) {
9518        return;
9519    }
9520    const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo;
9521    auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer);
9522    if (framebuffer_data == dev_data->frameBufferMap.end()) {
9523        return;
9524    }
9525    const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo;
9526    const VkSubpassDescription &subpass = pRenderPassInfo->pSubpasses[subpass_index];
9527    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9528        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pInputAttachments[j].attachment];
9529        SetLayout(dev_data, pCB, image_view, subpass.pInputAttachments[j].layout);
9530    }
9531    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9532        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pColorAttachments[j].attachment];
9533        SetLayout(dev_data, pCB, image_view, subpass.pColorAttachments[j].layout);
9534    }
9535    if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
9536        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pDepthStencilAttachment->attachment];
9537        SetLayout(dev_data, pCB, image_view, subpass.pDepthStencilAttachment->layout);
9538    }
9539}
9540
9541VkBool32 validatePrimaryCommandBuffer(const layer_data *my_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) {
9542    VkBool32 skip_call = VK_FALSE;
9543    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
9544        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9545                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.",
9546                             cmd_name.c_str());
9547    }
9548    return skip_call;
9549}
9550
9551void TransitionFinalSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) {
9552    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9553    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9554    auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9555    if (render_pass_data == dev_data->renderPassMap.end()) {
9556        return;
9557    }
9558    const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo;
9559    auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer);
9560    if (framebuffer_data == dev_data->frameBufferMap.end()) {
9561        return;
9562    }
9563    const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo;
9564    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9565        const VkImageView &image_view = framebufferInfo.pAttachments[i];
9566        SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
9567    }
9568}
9569
9570bool VerifyRenderAreaBounds(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
9571    bool skip_call = false;
9572    const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo;
9573    if (pRenderPassBegin->renderArea.offset.x < 0 ||
9574        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
9575        pRenderPassBegin->renderArea.offset.y < 0 ||
9576        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
9577        skip_call |= static_cast<bool>(log_msg(
9578            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9579            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
9580            "Cannot execute a render pass with renderArea not within the bound of the "
9581            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
9582            "height %d.",
9583            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
9584            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
9585    }
9586    return skip_call;
9587}
9588
9589VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
9590vkCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
9591    VkBool32 skipCall = VK_FALSE;
9592    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9593    loader_platform_thread_lock_mutex(&globalLock);
9594    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9595    if (pCB) {
9596        if (pRenderPassBegin && pRenderPassBegin->renderPass) {
9597#if MTMERGE
9598            auto pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9599            if (pass_data != dev_data->renderPassMap.end()) {
9600                RENDER_PASS_NODE* pRPNode = pass_data->second;
9601                pRPNode->fb = pRenderPassBegin->framebuffer;
9602                auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
9603                for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
9604                    MT_FB_ATTACHMENT_INFO &fb_info = dev_data->frameBufferMap[pRPNode->fb].attachments[i];
9605                    if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
9606                        if (cb_data != dev_data->commandBufferMap.end()) {
9607                            std::function<VkBool32()> function = [=]() {
9608                                set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9609                                return VK_FALSE;
9610                            };
9611                            cb_data->second->validate_functions.push_back(function);
9612                        }
9613                        VkImageLayout &attachment_layout = pRPNode->attachment_first_layout[pRPNode->attachments[i].attachment];
9614                        if (attachment_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL ||
9615                            attachment_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
9616                            skipCall |=
9617                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9618                                        VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, (uint64_t)(pRenderPassBegin->renderPass), __LINE__,
9619                                        MEMTRACK_INVALID_LAYOUT, "MEM", "Cannot clear attachment %d with invalid first layout %d.",
9620                                        pRPNode->attachments[i].attachment, attachment_layout);
9621                        }
9622                    } else if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE) {
9623                        if (cb_data != dev_data->commandBufferMap.end()) {
9624                            std::function<VkBool32()> function = [=]() {
9625                                set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9626                                return VK_FALSE;
9627                            };
9628                            cb_data->second->validate_functions.push_back(function);
9629                        }
9630                    } else if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_LOAD) {
9631                        if (cb_data != dev_data->commandBufferMap.end()) {
9632                            std::function<VkBool32()> function = [=]() {
9633                                return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9634                            };
9635                            cb_data->second->validate_functions.push_back(function);
9636                        }
9637                    }
9638                    if (pRPNode->attachment_first_read[pRPNode->attachments[i].attachment]) {
9639                        if (cb_data != dev_data->commandBufferMap.end()) {
9640                            std::function<VkBool32()> function = [=]() {
9641                                return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9642                            };
9643                            cb_data->second->validate_functions.push_back(function);
9644                        }
9645                    }
9646                }
9647            }
9648#endif
9649            skipCall |= static_cast<VkBool32>(VerifyRenderAreaBounds(dev_data, pRenderPassBegin));
9650            skipCall |= VerifyFramebufferAndRenderPassLayouts(commandBuffer, pRenderPassBegin);
9651            auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9652            if (render_pass_data != dev_data->renderPassMap.end()) {
9653                skipCall |= ValidateDependencies(dev_data, pRenderPassBegin, render_pass_data->second->subpassToNode);
9654            }
9655            skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBeginRenderPass");
9656            skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdBeginRenderPass");
9657            skipCall |= addCmd(dev_data, pCB, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
9658            pCB->activeRenderPass = pRenderPassBegin->renderPass;
9659            // This is a shallow copy as that is all that is needed for now
9660            pCB->activeRenderPassBeginInfo = *pRenderPassBegin;
9661            pCB->activeSubpass = 0;
9662            pCB->activeSubpassContents = contents;
9663            pCB->framebuffer = pRenderPassBegin->framebuffer;
9664            // Connect this framebuffer to this cmdBuffer
9665            dev_data->frameBufferMap[pCB->framebuffer].referencingCmdBuffers.insert(pCB->commandBuffer);
9666        } else {
9667            skipCall |=
9668                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9669                        DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()");
9670        }
9671    }
9672    loader_platform_thread_unlock_mutex(&globalLock);
9673    if (VK_FALSE == skipCall) {
9674        dev_data->device_dispatch_table->CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
9675        loader_platform_thread_lock_mutex(&globalLock);
9676        // This is a shallow copy as that is all that is needed for now
9677        dev_data->renderPassBeginInfo = *pRenderPassBegin;
9678        dev_data->currentSubpass = 0;
9679        loader_platform_thread_unlock_mutex(&globalLock);
9680    }
9681}
9682
9683VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
9684    VkBool32 skipCall = VK_FALSE;
9685    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9686    loader_platform_thread_lock_mutex(&globalLock);
9687    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9688    TransitionSubpassLayouts(commandBuffer, &dev_data->renderPassBeginInfo, ++dev_data->currentSubpass);
9689    if (pCB) {
9690        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass");
9691        skipCall |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
9692        pCB->activeSubpass++;
9693        pCB->activeSubpassContents = contents;
9694        TransitionSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
9695        if (pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline) {
9696            skipCall |= validatePipelineState(dev_data, pCB, VK_PIPELINE_BIND_POINT_GRAPHICS,
9697                                              pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
9698        }
9699        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass");
9700    }
9701    loader_platform_thread_unlock_mutex(&globalLock);
9702    if (VK_FALSE == skipCall)
9703        dev_data->device_dispatch_table->CmdNextSubpass(commandBuffer, contents);
9704}
9705
9706VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(VkCommandBuffer commandBuffer) {
9707    VkBool32 skipCall = VK_FALSE;
9708    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9709    loader_platform_thread_lock_mutex(&globalLock);
9710#if MTMERGESOURCE
9711    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
9712    if (cb_data != dev_data->commandBufferMap.end()) {
9713        auto pass_data = dev_data->renderPassMap.find(cb_data->second->activeRenderPass);
9714        if (pass_data != dev_data->renderPassMap.end()) {
9715            RENDER_PASS_NODE* pRPNode = pass_data->second;
9716            for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
9717                MT_FB_ATTACHMENT_INFO &fb_info = dev_data->frameBufferMap[pRPNode->fb].attachments[i];
9718                if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_STORE) {
9719                    if (cb_data != dev_data->commandBufferMap.end()) {
9720                        std::function<VkBool32()> function = [=]() {
9721                            set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9722                            return VK_FALSE;
9723                        };
9724                        cb_data->second->validate_functions.push_back(function);
9725                    }
9726                } else if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_DONT_CARE) {
9727                    if (cb_data != dev_data->commandBufferMap.end()) {
9728                        std::function<VkBool32()> function = [=]() {
9729                            set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9730                            return VK_FALSE;
9731                        };
9732                        cb_data->second->validate_functions.push_back(function);
9733                    }
9734                }
9735            }
9736        }
9737    }
9738#endif
9739    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9740    TransitionFinalSubpassLayouts(commandBuffer, &dev_data->renderPassBeginInfo);
9741    if (pCB) {
9742        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass");
9743        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass");
9744        skipCall |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
9745        TransitionFinalSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo);
9746        pCB->activeRenderPass = 0;
9747        pCB->activeSubpass = 0;
9748    }
9749    loader_platform_thread_unlock_mutex(&globalLock);
9750    if (VK_FALSE == skipCall)
9751        dev_data->device_dispatch_table->CmdEndRenderPass(commandBuffer);
9752}
9753
9754bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass,
9755                                 VkRenderPass primaryPass, uint32_t primaryAttach, uint32_t secondaryAttach, const char *msg) {
9756    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9757                   DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9758                   "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64
9759                   " that is not compatible with the current render pass %" PRIx64 "."
9760                   "Attachment %" PRIu32 " is not compatable with %" PRIu32 ". %s",
9761                   (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass), primaryAttach, secondaryAttach,
9762                   msg);
9763}
9764
9765bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9766                                     uint32_t primaryAttach, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass,
9767                                     uint32_t secondaryAttach, bool is_multi) {
9768    bool skip_call = false;
9769    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9770    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9771    if (primary_data->second->pCreateInfo->attachmentCount <= primaryAttach) {
9772        primaryAttach = VK_ATTACHMENT_UNUSED;
9773    }
9774    if (secondary_data->second->pCreateInfo->attachmentCount <= secondaryAttach) {
9775        secondaryAttach = VK_ATTACHMENT_UNUSED;
9776    }
9777    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
9778        return skip_call;
9779    }
9780    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
9781        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9782                                                 secondaryAttach, "The first is unused while the second is not.");
9783        return skip_call;
9784    }
9785    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
9786        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9787                                                 secondaryAttach, "The second is unused while the first is not.");
9788        return skip_call;
9789    }
9790    if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].format !=
9791        secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].format) {
9792        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9793                                                 secondaryAttach, "They have different formats.");
9794    }
9795    if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].samples !=
9796        secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].samples) {
9797        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9798                                                 secondaryAttach, "They have different samples.");
9799    }
9800    if (is_multi &&
9801        primary_data->second->pCreateInfo->pAttachments[primaryAttach].flags !=
9802            secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].flags) {
9803        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9804                                                 secondaryAttach, "They have different flags.");
9805    }
9806    return skip_call;
9807}
9808
9809bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9810                                  VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass, const int subpass, bool is_multi) {
9811    bool skip_call = false;
9812    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9813    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9814    const VkSubpassDescription &primary_desc = primary_data->second->pCreateInfo->pSubpasses[subpass];
9815    const VkSubpassDescription &secondary_desc = secondary_data->second->pCreateInfo->pSubpasses[subpass];
9816    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
9817    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
9818        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
9819        if (i < primary_desc.inputAttachmentCount) {
9820            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
9821        }
9822        if (i < secondary_desc.inputAttachmentCount) {
9823            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
9824        }
9825        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_input_attach, secondaryBuffer,
9826                                                     secondaryPass, secondary_input_attach, is_multi);
9827    }
9828    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
9829    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
9830        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
9831        if (i < primary_desc.colorAttachmentCount) {
9832            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
9833        }
9834        if (i < secondary_desc.colorAttachmentCount) {
9835            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
9836        }
9837        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_color_attach, secondaryBuffer,
9838                                                     secondaryPass, secondary_color_attach, is_multi);
9839        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
9840        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
9841            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
9842        }
9843        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
9844            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
9845        }
9846        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_resolve_attach, secondaryBuffer,
9847                                                     secondaryPass, secondary_resolve_attach, is_multi);
9848    }
9849    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
9850    if (primary_desc.pDepthStencilAttachment) {
9851        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
9852    }
9853    if (secondary_desc.pDepthStencilAttachment) {
9854        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
9855    }
9856    skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_depthstencil_attach, secondaryBuffer,
9857                                                 secondaryPass, secondary_depthstencil_attach, is_multi);
9858    return skip_call;
9859}
9860
9861bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9862                                     VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass) {
9863    bool skip_call = false;
9864    // Early exit if renderPass objects are identical (and therefore compatible)
9865    if (primaryPass == secondaryPass)
9866        return skip_call;
9867    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9868    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9869    if (primary_data == dev_data->renderPassMap.end() || primary_data->second == nullptr) {
9870        skip_call |=
9871            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9872                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9873                    "vkCmdExecuteCommands() called w/ invalid current Cmd Buffer %p which has invalid render pass %" PRIx64 ".",
9874                    (void *)primaryBuffer, (uint64_t)(primaryPass));
9875        return skip_call;
9876    }
9877    if (secondary_data == dev_data->renderPassMap.end() || secondary_data->second == nullptr) {
9878        skip_call |=
9879            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9880                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9881                    "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer %p which has invalid render pass %" PRIx64 ".",
9882                    (void *)secondaryBuffer, (uint64_t)(secondaryPass));
9883        return skip_call;
9884    }
9885    if (primary_data->second->pCreateInfo->subpassCount != secondary_data->second->pCreateInfo->subpassCount) {
9886        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9887                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9888                             "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64
9889                             " that is not compatible with the current render pass %" PRIx64 "."
9890                             "They have a different number of subpasses.",
9891                             (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass));
9892        return skip_call;
9893    }
9894    bool is_multi = primary_data->second->pCreateInfo->subpassCount > 1;
9895    for (uint32_t i = 0; i < primary_data->second->pCreateInfo->subpassCount; ++i) {
9896        skip_call |=
9897            validateSubpassCompatibility(dev_data, primaryBuffer, primaryPass, secondaryBuffer, secondaryPass, i, is_multi);
9898    }
9899    return skip_call;
9900}
9901
9902bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
9903                         VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
9904    bool skip_call = false;
9905    if (!pSubCB->beginInfo.pInheritanceInfo) {
9906        return skip_call;
9907    }
9908    VkFramebuffer primary_fb = pCB->framebuffer;
9909    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
9910    if (secondary_fb != VK_NULL_HANDLE) {
9911        if (primary_fb != secondary_fb) {
9912            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9913                                 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9914                                 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a framebuffer %" PRIx64
9915                                 " that is not compatible with the current framebuffer %" PRIx64 ".",
9916                                 (void *)secondaryBuffer, (uint64_t)(secondary_fb), (uint64_t)(primary_fb));
9917        }
9918        auto fb_data = dev_data->frameBufferMap.find(secondary_fb);
9919        if (fb_data == dev_data->frameBufferMap.end()) {
9920            skip_call |=
9921                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9922                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
9923                                                                          "which has invalid framebuffer %" PRIx64 ".",
9924                        (void *)secondaryBuffer, (uint64_t)(secondary_fb));
9925            return skip_call;
9926        }
9927        skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb_data->second.createInfo.renderPass,
9928                                                     secondaryBuffer, pSubCB->beginInfo.pInheritanceInfo->renderPass);
9929    }
9930    return skip_call;
9931}
9932
9933bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
9934    bool skipCall = false;
9935    unordered_set<int> activeTypes;
9936    for (auto queryObject : pCB->activeQueries) {
9937        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9938        if (queryPoolData != dev_data->queryPoolMap.end()) {
9939            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
9940                pSubCB->beginInfo.pInheritanceInfo) {
9941                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
9942                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
9943                    skipCall |= log_msg(
9944                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9945                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9946                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
9947                        "which has invalid active query pool %" PRIx64 ". Pipeline statistics is being queried so the command "
9948                        "buffer must have all bits set on the queryPool.",
9949                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first));
9950                }
9951            }
9952            activeTypes.insert(queryPoolData->second.createInfo.queryType);
9953        }
9954    }
9955    for (auto queryObject : pSubCB->startedQueries) {
9956        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9957        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
9958            skipCall |=
9959                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9960                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9961                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
9962                        "which has invalid active query pool %" PRIx64 "of type %d but a query of that type has been started on "
9963                        "secondary Cmd Buffer %p.",
9964                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first),
9965                        queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer));
9966        }
9967    }
9968    return skipCall;
9969}
9970
9971VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
9972vkCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
9973    VkBool32 skipCall = VK_FALSE;
9974    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9975    loader_platform_thread_lock_mutex(&globalLock);
9976    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9977    if (pCB) {
9978        GLOBAL_CB_NODE *pSubCB = NULL;
9979        for (uint32_t i = 0; i < commandBuffersCount; i++) {
9980            pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
9981            if (!pSubCB) {
9982                skipCall |=
9983                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9984                            DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9985                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p in element %u of pCommandBuffers array.",
9986                            (void *)pCommandBuffers[i], i);
9987            } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
9988                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9989                                    __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9990                                    "vkCmdExecuteCommands() called w/ Primary Cmd Buffer %p in element %u of pCommandBuffers "
9991                                    "array. All cmd buffers in pCommandBuffers array must be secondary.",
9992                                    (void *)pCommandBuffers[i], i);
9993            } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
9994                if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
9995                    skipCall |= log_msg(
9996                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9997                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
9998                        "vkCmdExecuteCommands(): Secondary Command Buffer (%p) executed within render pass (%#" PRIxLEAST64
9999                        ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
10000                        (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass);
10001                } else {
10002                    // Make sure render pass is compatible with parent command buffer pass if has continue
10003                    skipCall |= validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass, pCommandBuffers[i],
10004                                                                pSubCB->beginInfo.pInheritanceInfo->renderPass);
10005                    skipCall |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
10006                }
10007                string errorString = "";
10008                if (!verify_renderpass_compatibility(dev_data, pCB->activeRenderPass,
10009                                                     pSubCB->beginInfo.pInheritanceInfo->renderPass, errorString)) {
10010                    skipCall |= log_msg(
10011                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10012                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
10013                        "vkCmdExecuteCommands(): Secondary Command Buffer (%p) w/ render pass (%#" PRIxLEAST64
10014                        ") is incompatible w/ primary command buffer (%p) w/ render pass (%#" PRIxLEAST64 ") due to: %s",
10015                        (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer,
10016                        (uint64_t)pCB->activeRenderPass, errorString.c_str());
10017                }
10018                //  If framebuffer for secondary CB is not NULL, then it must match FB from vkCmdBeginRenderPass()
10019                //   that this CB will be executed in AND framebuffer must have been created w/ RP compatible w/ renderpass
10020                if (pSubCB->beginInfo.pInheritanceInfo->framebuffer) {
10021                    if (pSubCB->beginInfo.pInheritanceInfo->framebuffer != pCB->activeRenderPassBeginInfo.framebuffer) {
10022                        skipCall |= log_msg(
10023                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10024                            (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS",
10025                            "vkCmdExecuteCommands(): Secondary Command Buffer (%p) references framebuffer (%#" PRIxLEAST64
10026                            ") that does not match framebuffer (%#" PRIxLEAST64 ") in active renderpass (%#" PRIxLEAST64 ").",
10027                            (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->framebuffer,
10028                            (uint64_t)pCB->activeRenderPassBeginInfo.framebuffer, (uint64_t)pCB->activeRenderPass);
10029                    }
10030                }
10031            }
10032            // TODO(mlentine): Move more logic into this method
10033            skipCall |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
10034            skipCall |= validateCommandBufferState(dev_data, pSubCB);
10035            // Secondary cmdBuffers are considered pending execution starting w/
10036            // being recorded
10037            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
10038                if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
10039                    skipCall |= log_msg(
10040                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10041                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10042                        "Attempt to simultaneously execute CB %#" PRIxLEAST64 " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10043                        "set!",
10044                        (uint64_t)(pCB->commandBuffer));
10045                }
10046                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
10047                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
10048                    skipCall |= log_msg(
10049                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10050                        (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10051                        "vkCmdExecuteCommands(): Secondary Command Buffer (%#" PRIxLEAST64
10052                        ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
10053                        "(%#" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10054                                          "set, even though it does.",
10055                        (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer));
10056                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
10057                }
10058            }
10059            if (!pCB->activeQueries.empty() && !dev_data->physDevProperties.features.inheritedQueries) {
10060                skipCall |=
10061                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10062                            reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
10063                            "vkCmdExecuteCommands(): Secondary Command Buffer "
10064                            "(%#" PRIxLEAST64 ") cannot be submitted with a query in "
10065                            "flight and inherited queries not "
10066                            "supported on this device.",
10067                            reinterpret_cast<uint64_t>(pCommandBuffers[i]));
10068            }
10069            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
10070            pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
10071            dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
10072        }
10073        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands");
10074        skipCall |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
10075    }
10076    loader_platform_thread_unlock_mutex(&globalLock);
10077    if (VK_FALSE == skipCall)
10078        dev_data->device_dispatch_table->CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
10079}
10080
10081VkBool32 ValidateMapImageLayouts(VkDevice device, VkDeviceMemory mem) {
10082    VkBool32 skip_call = VK_FALSE;
10083    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10084    auto mem_data = dev_data->memObjMap.find(mem);
10085    if ((mem_data != dev_data->memObjMap.end()) && (mem_data->second.image != VK_NULL_HANDLE)) {
10086        std::vector<VkImageLayout> layouts;
10087        if (FindLayouts(dev_data, mem_data->second.image, layouts)) {
10088            for (auto layout : layouts) {
10089                if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
10090                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10091                                         __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
10092                                                                                         "GENERAL or PREINITIALIZED are supported.",
10093                                         string_VkImageLayout(layout));
10094                }
10095            }
10096        }
10097    }
10098    return skip_call;
10099}
10100
10101VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10102vkMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
10103    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10104
10105    VkBool32 skip_call = VK_FALSE;
10106    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10107    loader_platform_thread_lock_mutex(&globalLock);
10108#if MTMERGESOURCE
10109    DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
10110    if (pMemObj) {
10111        pMemObj->valid = true;
10112        if ((memProps.memoryTypes[pMemObj->allocInfo.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
10113            skip_call =
10114                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10115                        (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM",
10116                        "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj %#" PRIxLEAST64, (uint64_t)mem);
10117        }
10118    }
10119    skip_call |= validateMemRange(dev_data, mem, offset, size);
10120    storeMemRanges(dev_data, mem, offset, size);
10121#endif
10122    skip_call |= ValidateMapImageLayouts(device, mem);
10123    loader_platform_thread_unlock_mutex(&globalLock);
10124
10125    if (VK_FALSE == skip_call) {
10126        result = dev_data->device_dispatch_table->MapMemory(device, mem, offset, size, flags, ppData);
10127#if MTMERGESOURCE
10128        loader_platform_thread_lock_mutex(&globalLock);
10129        initializeAndTrackMemory(dev_data, mem, size, ppData);
10130        loader_platform_thread_unlock_mutex(&globalLock);
10131#endif
10132    }
10133    return result;
10134}
10135
10136#if MTMERGESOURCE
10137VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkUnmapMemory(VkDevice device, VkDeviceMemory mem) {
10138    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10139    VkBool32 skipCall = VK_FALSE;
10140
10141    loader_platform_thread_lock_mutex(&globalLock);
10142    skipCall |= deleteMemRanges(my_data, mem);
10143    loader_platform_thread_unlock_mutex(&globalLock);
10144    if (VK_FALSE == skipCall) {
10145        my_data->device_dispatch_table->UnmapMemory(device, mem);
10146    }
10147}
10148
10149VkBool32 validateMemoryIsMapped(layer_data *my_data, const char *funcName, uint32_t memRangeCount,
10150                                const VkMappedMemoryRange *pMemRanges) {
10151    VkBool32 skipCall = VK_FALSE;
10152    for (uint32_t i = 0; i < memRangeCount; ++i) {
10153        auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
10154        if (mem_element != my_data->memObjMap.end()) {
10155            if (mem_element->second.memRange.offset > pMemRanges[i].offset) {
10156                skipCall |= log_msg(
10157                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10158                    (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10159                    "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
10160                    "(" PRINTF_SIZE_T_SPECIFIER ").",
10161                    funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_element->second.memRange.offset));
10162            }
10163            if ((mem_element->second.memRange.size != VK_WHOLE_SIZE) &&
10164                ((mem_element->second.memRange.offset + mem_element->second.memRange.size) <
10165                 (pMemRanges[i].offset + pMemRanges[i].size))) {
10166                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10167                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10168                                    MEMTRACK_INVALID_MAP, "MEM", "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER
10169                                                                 ") exceeds the Memory Object's upper-bound "
10170                                                                 "(" PRINTF_SIZE_T_SPECIFIER ").",
10171                                    funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
10172                                    static_cast<size_t>(mem_element->second.memRange.offset + mem_element->second.memRange.size));
10173            }
10174        }
10175    }
10176    return skipCall;
10177}
10178
10179VkBool32 validateAndCopyNoncoherentMemoryToDriver(layer_data *my_data, uint32_t memRangeCount,
10180                                                  const VkMappedMemoryRange *pMemRanges) {
10181    VkBool32 skipCall = VK_FALSE;
10182    for (uint32_t i = 0; i < memRangeCount; ++i) {
10183        auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
10184        if (mem_element != my_data->memObjMap.end()) {
10185            if (mem_element->second.pData) {
10186                VkDeviceSize size = mem_element->second.memRange.size;
10187                VkDeviceSize half_size = (size / 2);
10188                char *data = static_cast<char *>(mem_element->second.pData);
10189                for (auto j = 0; j < half_size; ++j) {
10190                    if (data[j] != NoncoherentMemoryFillValue) {
10191                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10192                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10193                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64,
10194                                            (uint64_t)pMemRanges[i].memory);
10195                    }
10196                }
10197                for (auto j = size + half_size; j < 2 * size; ++j) {
10198                    if (data[j] != NoncoherentMemoryFillValue) {
10199                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10200                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10201                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64,
10202                                            (uint64_t)pMemRanges[i].memory);
10203                    }
10204                }
10205                memcpy(mem_element->second.pDriverData, static_cast<void *>(data + (size_t)(half_size)), (size_t)(size));
10206            }
10207        }
10208    }
10209    return skipCall;
10210}
10211
10212VK_LAYER_EXPORT VkResult VKAPI_CALL
10213vkFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10214    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10215    VkBool32 skipCall = VK_FALSE;
10216    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10217
10218    loader_platform_thread_lock_mutex(&globalLock);
10219    skipCall |= validateAndCopyNoncoherentMemoryToDriver(my_data, memRangeCount, pMemRanges);
10220    skipCall |= validateMemoryIsMapped(my_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
10221    loader_platform_thread_unlock_mutex(&globalLock);
10222    if (VK_FALSE == skipCall) {
10223        result = my_data->device_dispatch_table->FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
10224    }
10225    return result;
10226}
10227
10228VK_LAYER_EXPORT VkResult VKAPI_CALL
10229vkInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10230    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10231    VkBool32 skipCall = VK_FALSE;
10232    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10233
10234    loader_platform_thread_lock_mutex(&globalLock);
10235    skipCall |= validateMemoryIsMapped(my_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
10236    loader_platform_thread_unlock_mutex(&globalLock);
10237    if (VK_FALSE == skipCall) {
10238        result = my_data->device_dispatch_table->InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
10239    }
10240    return result;
10241}
10242#endif
10243
10244VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
10245    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10246    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10247    VkBool32 skipCall = VK_FALSE;
10248#if MTMERGESOURCE
10249    loader_platform_thread_lock_mutex(&globalLock);
10250    // Track objects tied to memory
10251    uint64_t image_handle = (uint64_t)(image);
10252    skipCall =
10253        set_mem_binding(dev_data, device, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
10254    add_object_binding_info(dev_data, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, mem);
10255    {
10256        VkMemoryRequirements memRequirements;
10257        vkGetImageMemoryRequirements(device, image, &memRequirements);
10258        skipCall |= validate_buffer_image_aliasing(dev_data, image_handle, mem, memoryOffset, memRequirements,
10259                                                   dev_data->memObjMap[mem].imageRanges, dev_data->memObjMap[mem].bufferRanges,
10260                                                   VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
10261    }
10262    print_mem_list(dev_data, device);
10263    loader_platform_thread_unlock_mutex(&globalLock);
10264#endif
10265    if (VK_FALSE == skipCall) {
10266        result = dev_data->device_dispatch_table->BindImageMemory(device, image, mem, memoryOffset);
10267        VkMemoryRequirements memRequirements;
10268        dev_data->device_dispatch_table->GetImageMemoryRequirements(device, image, &memRequirements);
10269        loader_platform_thread_lock_mutex(&globalLock);
10270        dev_data->memObjMap[mem].image = image;
10271        dev_data->imageMap[image].mem = mem;
10272        dev_data->imageMap[image].memOffset = memoryOffset;
10273        dev_data->imageMap[image].memSize = memRequirements.size;
10274        loader_platform_thread_unlock_mutex(&globalLock);
10275    }
10276    return result;
10277}
10278
10279VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent(VkDevice device, VkEvent event) {
10280    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10281    loader_platform_thread_lock_mutex(&globalLock);
10282    dev_data->eventMap[event].needsSignaled = false;
10283    dev_data->eventMap[event].stageMask = VK_PIPELINE_STAGE_HOST_BIT;
10284    loader_platform_thread_unlock_mutex(&globalLock);
10285    VkResult result = dev_data->device_dispatch_table->SetEvent(device, event);
10286    return result;
10287}
10288
10289VKAPI_ATTR VkResult VKAPI_CALL
10290vkQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
10291    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10292    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10293    VkBool32 skip_call = VK_FALSE;
10294#if MTMERGESOURCE
10295    //MTMTODO : Merge this code with the checks below
10296    loader_platform_thread_lock_mutex(&globalLock);
10297
10298    for (uint32_t i = 0; i < bindInfoCount; i++) {
10299        const VkBindSparseInfo *bindInfo = &pBindInfo[i];
10300        // Track objects tied to memory
10301        for (uint32_t j = 0; j < bindInfo->bufferBindCount; j++) {
10302            for (uint32_t k = 0; k < bindInfo->pBufferBinds[j].bindCount; k++) {
10303                if (set_sparse_mem_binding(dev_data, queue, bindInfo->pBufferBinds[j].pBinds[k].memory,
10304                                           (uint64_t)bindInfo->pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
10305                                           "vkQueueBindSparse"))
10306                    skip_call = VK_TRUE;
10307            }
10308        }
10309        for (uint32_t j = 0; j < bindInfo->imageOpaqueBindCount; j++) {
10310            for (uint32_t k = 0; k < bindInfo->pImageOpaqueBinds[j].bindCount; k++) {
10311                if (set_sparse_mem_binding(dev_data, queue, bindInfo->pImageOpaqueBinds[j].pBinds[k].memory,
10312                                           (uint64_t)bindInfo->pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10313                                           "vkQueueBindSparse"))
10314                    skip_call = VK_TRUE;
10315            }
10316        }
10317        for (uint32_t j = 0; j < bindInfo->imageBindCount; j++) {
10318            for (uint32_t k = 0; k < bindInfo->pImageBinds[j].bindCount; k++) {
10319                if (set_sparse_mem_binding(dev_data, queue, bindInfo->pImageBinds[j].pBinds[k].memory,
10320                                           (uint64_t)bindInfo->pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10321                                           "vkQueueBindSparse"))
10322                    skip_call = VK_TRUE;
10323            }
10324        }
10325        // Validate semaphore state
10326        for (uint32_t i = 0; i < bindInfo->waitSemaphoreCount; i++) {
10327            VkSemaphore sem = bindInfo->pWaitSemaphores[i];
10328
10329            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10330                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_SIGNALLED) {
10331                    skip_call =
10332                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10333                                (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
10334                                "vkQueueBindSparse: Semaphore must be in signaled state before passing to pWaitSemaphores");
10335                }
10336                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_WAIT;
10337            }
10338        }
10339        for (uint32_t i = 0; i < bindInfo->signalSemaphoreCount; i++) {
10340            VkSemaphore sem = bindInfo->pSignalSemaphores[i];
10341
10342            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10343                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_UNSET) {
10344                    skip_call =
10345                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10346                                (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
10347                                "vkQueueBindSparse: Semaphore must not be currently signaled or in a wait state");
10348                }
10349                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
10350            }
10351        }
10352    }
10353
10354    print_mem_list(dev_data, queue);
10355    loader_platform_thread_unlock_mutex(&globalLock);
10356#endif
10357    loader_platform_thread_lock_mutex(&globalLock);
10358    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
10359        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
10360        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
10361            if (dev_data->semaphoreMap[bindInfo.pWaitSemaphores[i]].signaled) {
10362                dev_data->semaphoreMap[bindInfo.pWaitSemaphores[i]].signaled = 0;
10363            } else {
10364                skip_call |=
10365                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
10366                            __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10367                            "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
10368                            (uint64_t)(queue), (uint64_t)(bindInfo.pWaitSemaphores[i]));
10369            }
10370        }
10371        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
10372            dev_data->semaphoreMap[bindInfo.pSignalSemaphores[i]].signaled = 1;
10373        }
10374    }
10375    loader_platform_thread_unlock_mutex(&globalLock);
10376
10377    if (VK_FALSE == skip_call)
10378        return dev_data->device_dispatch_table->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
10379#if MTMERGESOURCE
10380    // Update semaphore state
10381    loader_platform_thread_lock_mutex(&globalLock);
10382    for (uint32_t bind_info_idx = 0; bind_info_idx < bindInfoCount; bind_info_idx++) {
10383        const VkBindSparseInfo *bindInfo = &pBindInfo[bind_info_idx];
10384        for (uint32_t i = 0; i < bindInfo->waitSemaphoreCount; i++) {
10385            VkSemaphore sem = bindInfo->pWaitSemaphores[i];
10386
10387            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10388                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_UNSET;
10389            }
10390        }
10391    }
10392    loader_platform_thread_unlock_mutex(&globalLock);
10393#endif
10394
10395    return result;
10396}
10397
10398VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
10399                                                 const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
10400    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10401    VkResult result = dev_data->device_dispatch_table->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
10402    if (result == VK_SUCCESS) {
10403        loader_platform_thread_lock_mutex(&globalLock);
10404        SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
10405        sNode->signaled = 0;
10406        sNode->queue = VK_NULL_HANDLE;
10407        sNode->in_use.store(0);
10408        sNode->state = MEMTRACK_SEMAPHORE_STATE_UNSET;
10409        loader_platform_thread_unlock_mutex(&globalLock);
10410    }
10411    return result;
10412}
10413
10414VKAPI_ATTR VkResult VKAPI_CALL
10415vkCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
10416    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10417    VkResult result = dev_data->device_dispatch_table->CreateEvent(device, pCreateInfo, pAllocator, pEvent);
10418    if (result == VK_SUCCESS) {
10419        loader_platform_thread_lock_mutex(&globalLock);
10420        dev_data->eventMap[*pEvent].needsSignaled = false;
10421        dev_data->eventMap[*pEvent].in_use.store(0);
10422        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
10423        loader_platform_thread_unlock_mutex(&globalLock);
10424    }
10425    return result;
10426}
10427
10428VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
10429                                                                    const VkAllocationCallbacks *pAllocator,
10430                                                                    VkSwapchainKHR *pSwapchain) {
10431    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10432    VkResult result = dev_data->device_dispatch_table->CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
10433
10434    if (VK_SUCCESS == result) {
10435        SWAPCHAIN_NODE *psc_node = new SWAPCHAIN_NODE(pCreateInfo);
10436        loader_platform_thread_lock_mutex(&globalLock);
10437        dev_data->device_extensions.swapchainMap[*pSwapchain] = psc_node;
10438        loader_platform_thread_unlock_mutex(&globalLock);
10439    }
10440
10441    return result;
10442}
10443
10444VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
10445vkDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
10446    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10447    bool skipCall = false;
10448
10449    loader_platform_thread_lock_mutex(&globalLock);
10450    auto swapchain_data = dev_data->device_extensions.swapchainMap.find(swapchain);
10451    if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
10452        if (swapchain_data->second->images.size() > 0) {
10453            for (auto swapchain_image : swapchain_data->second->images) {
10454                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
10455                if (image_sub != dev_data->imageSubresourceMap.end()) {
10456                    for (auto imgsubpair : image_sub->second) {
10457                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
10458                        if (image_item != dev_data->imageLayoutMap.end()) {
10459                            dev_data->imageLayoutMap.erase(image_item);
10460                        }
10461                    }
10462                    dev_data->imageSubresourceMap.erase(image_sub);
10463                }
10464#if MTMERGESOURCE
10465                skipCall = clear_object_binding(dev_data, device, (uint64_t)swapchain_image,
10466                                                VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
10467                dev_data->imageBindingMap.erase((uint64_t)swapchain_image);
10468#endif
10469            }
10470        }
10471        delete swapchain_data->second;
10472        dev_data->device_extensions.swapchainMap.erase(swapchain);
10473    }
10474    loader_platform_thread_unlock_mutex(&globalLock);
10475    if (!skipCall)
10476        dev_data->device_dispatch_table->DestroySwapchainKHR(device, swapchain, pAllocator);
10477}
10478
10479VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10480vkGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
10481    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10482    VkResult result = dev_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
10483
10484    if (result == VK_SUCCESS && pSwapchainImages != NULL) {
10485        // This should never happen and is checked by param checker.
10486        if (!pCount)
10487            return result;
10488        loader_platform_thread_lock_mutex(&globalLock);
10489        const size_t count = *pCount;
10490        auto swapchain_node = dev_data->device_extensions.swapchainMap[swapchain];
10491        if (!swapchain_node->images.empty()) {
10492            // TODO : Not sure I like the memcmp here, but it works
10493            const bool mismatch = (swapchain_node->images.size() != count ||
10494                                   memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
10495            if (mismatch) {
10496                // TODO: Verify against Valid Usage section of extension
10497                log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10498                        (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
10499                        "vkGetSwapchainInfoKHR(%" PRIu64
10500                        ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
10501                        (uint64_t)(swapchain));
10502            }
10503        }
10504        for (uint32_t i = 0; i < *pCount; ++i) {
10505            IMAGE_LAYOUT_NODE image_layout_node;
10506            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
10507            image_layout_node.format = swapchain_node->createInfo.imageFormat;
10508            dev_data->imageMap[pSwapchainImages[i]].createInfo.mipLevels = 1;
10509            dev_data->imageMap[pSwapchainImages[i]].createInfo.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
10510            swapchain_node->images.push_back(pSwapchainImages[i]);
10511            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
10512            dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
10513            dev_data->imageLayoutMap[subpair] = image_layout_node;
10514            dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
10515        }
10516        if (!swapchain_node->images.empty()) {
10517            for (auto image : swapchain_node->images) {
10518                // Add image object binding, then insert the new Mem Object and then bind it to created image
10519#if MTMERGESOURCE
10520                add_object_create_info(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10521                                       &swapchain_node->createInfo);
10522#endif
10523            }
10524        }
10525        loader_platform_thread_unlock_mutex(&globalLock);
10526    }
10527    return result;
10528}
10529
10530VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
10531    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10532    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10533    bool skip_call = false;
10534
10535    if (pPresentInfo) {
10536        loader_platform_thread_lock_mutex(&globalLock);
10537        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10538            if (dev_data->semaphoreMap[pPresentInfo->pWaitSemaphores[i]].signaled) {
10539                dev_data->semaphoreMap[pPresentInfo->pWaitSemaphores[i]].signaled = 0;
10540            } else {
10541                skip_call |=
10542                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
10543                            __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10544                            "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
10545                            (uint64_t)(queue), (uint64_t)(pPresentInfo->pWaitSemaphores[i]));
10546            }
10547        }
10548        VkDeviceMemory mem;
10549        for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
10550            auto swapchain_data = dev_data->device_extensions.swapchainMap.find(pPresentInfo->pSwapchains[i]);
10551            if (swapchain_data != dev_data->device_extensions.swapchainMap.end() &&
10552                pPresentInfo->pImageIndices[i] < swapchain_data->second->images.size()) {
10553                VkImage image = swapchain_data->second->images[pPresentInfo->pImageIndices[i]];
10554#if MTMERGESOURCE
10555                skip_call |=
10556                    get_mem_binding_from_object(dev_data, queue, (uint64_t)(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
10557                skip_call |= validate_memory_is_valid(dev_data, mem, "vkQueuePresentKHR()", image);
10558#endif
10559                vector<VkImageLayout> layouts;
10560                if (FindLayouts(dev_data, image, layouts)) {
10561                    for (auto layout : layouts) {
10562                        if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
10563                            skip_call |=
10564                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
10565                                        reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10566                                        "Images passed to present must be in layout "
10567                                        "PRESENT_SOURCE_KHR but is in %s",
10568                                        string_VkImageLayout(layout));
10569                        }
10570                    }
10571                }
10572            }
10573        }
10574        loader_platform_thread_unlock_mutex(&globalLock);
10575    }
10576
10577    if (!skip_call)
10578        result = dev_data->device_dispatch_table->QueuePresentKHR(queue, pPresentInfo);
10579#if MTMERGESOURCE
10580    loader_platform_thread_lock_mutex(&globalLock);
10581    for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; i++) {
10582        VkSemaphore sem = pPresentInfo->pWaitSemaphores[i];
10583        if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10584            dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_UNSET;
10585        }
10586    }
10587    loader_platform_thread_unlock_mutex(&globalLock);
10588#endif
10589    return result;
10590}
10591
10592VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
10593                                                     VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
10594    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10595    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10596    bool skipCall = false;
10597#if MTMERGESOURCE
10598    loader_platform_thread_lock_mutex(&globalLock);
10599    if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
10600        if (dev_data->semaphoreMap[semaphore].state != MEMTRACK_SEMAPHORE_STATE_UNSET) {
10601            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10602                               (uint64_t)semaphore, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
10603                               "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state");
10604        }
10605        dev_data->semaphoreMap[semaphore].state = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
10606    }
10607    auto fence_data = dev_data->fenceMap.find(fence);
10608    if (fence_data != dev_data->fenceMap.end()) {
10609        fence_data->second.swapchain = swapchain;
10610    }
10611    loader_platform_thread_unlock_mutex(&globalLock);
10612#endif
10613    if (!skipCall) {
10614        result =
10615            dev_data->device_dispatch_table->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
10616    }
10617    loader_platform_thread_lock_mutex(&globalLock);
10618    // FIXME/TODO: Need to add some thing code the "fence" parameter
10619    dev_data->semaphoreMap[semaphore].signaled = 1;
10620    loader_platform_thread_unlock_mutex(&globalLock);
10621    return result;
10622}
10623
10624VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10625vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
10626                               const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
10627    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10628    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10629    VkResult res = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
10630    if (VK_SUCCESS == res) {
10631        loader_platform_thread_lock_mutex(&globalLock);
10632        res = layer_create_msg_callback(my_data->report_data, pCreateInfo, pAllocator, pMsgCallback);
10633        loader_platform_thread_unlock_mutex(&globalLock);
10634    }
10635    return res;
10636}
10637
10638VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkInstance instance,
10639                                                                           VkDebugReportCallbackEXT msgCallback,
10640                                                                           const VkAllocationCallbacks *pAllocator) {
10641    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10642    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10643    pTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
10644    loader_platform_thread_lock_mutex(&globalLock);
10645    layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator);
10646    loader_platform_thread_unlock_mutex(&globalLock);
10647}
10648
10649VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
10650vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
10651                        size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
10652    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10653    my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
10654                                                            pMsg);
10655}
10656
10657VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
10658    if (!strcmp(funcName, "vkGetDeviceProcAddr"))
10659        return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
10660    if (!strcmp(funcName, "vkDestroyDevice"))
10661        return (PFN_vkVoidFunction)vkDestroyDevice;
10662    if (!strcmp(funcName, "vkQueueSubmit"))
10663        return (PFN_vkVoidFunction)vkQueueSubmit;
10664    if (!strcmp(funcName, "vkWaitForFences"))
10665        return (PFN_vkVoidFunction)vkWaitForFences;
10666    if (!strcmp(funcName, "vkGetFenceStatus"))
10667        return (PFN_vkVoidFunction)vkGetFenceStatus;
10668    if (!strcmp(funcName, "vkQueueWaitIdle"))
10669        return (PFN_vkVoidFunction)vkQueueWaitIdle;
10670    if (!strcmp(funcName, "vkDeviceWaitIdle"))
10671        return (PFN_vkVoidFunction)vkDeviceWaitIdle;
10672    if (!strcmp(funcName, "vkGetDeviceQueue"))
10673        return (PFN_vkVoidFunction)vkGetDeviceQueue;
10674    if (!strcmp(funcName, "vkDestroyInstance"))
10675        return (PFN_vkVoidFunction)vkDestroyInstance;
10676    if (!strcmp(funcName, "vkDestroyDevice"))
10677        return (PFN_vkVoidFunction)vkDestroyDevice;
10678    if (!strcmp(funcName, "vkDestroyFence"))
10679        return (PFN_vkVoidFunction)vkDestroyFence;
10680    if (!strcmp(funcName, "vkResetFences"))
10681        return (PFN_vkVoidFunction)vkResetFences;
10682    if (!strcmp(funcName, "vkDestroySemaphore"))
10683        return (PFN_vkVoidFunction)vkDestroySemaphore;
10684    if (!strcmp(funcName, "vkDestroyEvent"))
10685        return (PFN_vkVoidFunction)vkDestroyEvent;
10686    if (!strcmp(funcName, "vkDestroyQueryPool"))
10687        return (PFN_vkVoidFunction)vkDestroyQueryPool;
10688    if (!strcmp(funcName, "vkDestroyBuffer"))
10689        return (PFN_vkVoidFunction)vkDestroyBuffer;
10690    if (!strcmp(funcName, "vkDestroyBufferView"))
10691        return (PFN_vkVoidFunction)vkDestroyBufferView;
10692    if (!strcmp(funcName, "vkDestroyImage"))
10693        return (PFN_vkVoidFunction)vkDestroyImage;
10694    if (!strcmp(funcName, "vkDestroyImageView"))
10695        return (PFN_vkVoidFunction)vkDestroyImageView;
10696    if (!strcmp(funcName, "vkDestroyShaderModule"))
10697        return (PFN_vkVoidFunction)vkDestroyShaderModule;
10698    if (!strcmp(funcName, "vkDestroyPipeline"))
10699        return (PFN_vkVoidFunction)vkDestroyPipeline;
10700    if (!strcmp(funcName, "vkDestroyPipelineLayout"))
10701        return (PFN_vkVoidFunction)vkDestroyPipelineLayout;
10702    if (!strcmp(funcName, "vkDestroySampler"))
10703        return (PFN_vkVoidFunction)vkDestroySampler;
10704    if (!strcmp(funcName, "vkDestroyDescriptorSetLayout"))
10705        return (PFN_vkVoidFunction)vkDestroyDescriptorSetLayout;
10706    if (!strcmp(funcName, "vkDestroyDescriptorPool"))
10707        return (PFN_vkVoidFunction)vkDestroyDescriptorPool;
10708    if (!strcmp(funcName, "vkDestroyFramebuffer"))
10709        return (PFN_vkVoidFunction)vkDestroyFramebuffer;
10710    if (!strcmp(funcName, "vkDestroyRenderPass"))
10711        return (PFN_vkVoidFunction)vkDestroyRenderPass;
10712    if (!strcmp(funcName, "vkCreateBuffer"))
10713        return (PFN_vkVoidFunction)vkCreateBuffer;
10714    if (!strcmp(funcName, "vkCreateBufferView"))
10715        return (PFN_vkVoidFunction)vkCreateBufferView;
10716    if (!strcmp(funcName, "vkCreateImage"))
10717        return (PFN_vkVoidFunction)vkCreateImage;
10718    if (!strcmp(funcName, "vkCreateImageView"))
10719        return (PFN_vkVoidFunction)vkCreateImageView;
10720    if (!strcmp(funcName, "vkCreateFence"))
10721        return (PFN_vkVoidFunction)vkCreateFence;
10722    if (!strcmp(funcName, "CreatePipelineCache"))
10723        return (PFN_vkVoidFunction)vkCreatePipelineCache;
10724    if (!strcmp(funcName, "DestroyPipelineCache"))
10725        return (PFN_vkVoidFunction)vkDestroyPipelineCache;
10726    if (!strcmp(funcName, "GetPipelineCacheData"))
10727        return (PFN_vkVoidFunction)vkGetPipelineCacheData;
10728    if (!strcmp(funcName, "MergePipelineCaches"))
10729        return (PFN_vkVoidFunction)vkMergePipelineCaches;
10730    if (!strcmp(funcName, "vkCreateGraphicsPipelines"))
10731        return (PFN_vkVoidFunction)vkCreateGraphicsPipelines;
10732    if (!strcmp(funcName, "vkCreateComputePipelines"))
10733        return (PFN_vkVoidFunction)vkCreateComputePipelines;
10734    if (!strcmp(funcName, "vkCreateSampler"))
10735        return (PFN_vkVoidFunction)vkCreateSampler;
10736    if (!strcmp(funcName, "vkCreateDescriptorSetLayout"))
10737        return (PFN_vkVoidFunction)vkCreateDescriptorSetLayout;
10738    if (!strcmp(funcName, "vkCreatePipelineLayout"))
10739        return (PFN_vkVoidFunction)vkCreatePipelineLayout;
10740    if (!strcmp(funcName, "vkCreateDescriptorPool"))
10741        return (PFN_vkVoidFunction)vkCreateDescriptorPool;
10742    if (!strcmp(funcName, "vkResetDescriptorPool"))
10743        return (PFN_vkVoidFunction)vkResetDescriptorPool;
10744    if (!strcmp(funcName, "vkAllocateDescriptorSets"))
10745        return (PFN_vkVoidFunction)vkAllocateDescriptorSets;
10746    if (!strcmp(funcName, "vkFreeDescriptorSets"))
10747        return (PFN_vkVoidFunction)vkFreeDescriptorSets;
10748    if (!strcmp(funcName, "vkUpdateDescriptorSets"))
10749        return (PFN_vkVoidFunction)vkUpdateDescriptorSets;
10750    if (!strcmp(funcName, "vkCreateCommandPool"))
10751        return (PFN_vkVoidFunction)vkCreateCommandPool;
10752    if (!strcmp(funcName, "vkDestroyCommandPool"))
10753        return (PFN_vkVoidFunction)vkDestroyCommandPool;
10754    if (!strcmp(funcName, "vkResetCommandPool"))
10755        return (PFN_vkVoidFunction)vkResetCommandPool;
10756    if (!strcmp(funcName, "vkCreateQueryPool"))
10757        return (PFN_vkVoidFunction)vkCreateQueryPool;
10758    if (!strcmp(funcName, "vkAllocateCommandBuffers"))
10759        return (PFN_vkVoidFunction)vkAllocateCommandBuffers;
10760    if (!strcmp(funcName, "vkFreeCommandBuffers"))
10761        return (PFN_vkVoidFunction)vkFreeCommandBuffers;
10762    if (!strcmp(funcName, "vkBeginCommandBuffer"))
10763        return (PFN_vkVoidFunction)vkBeginCommandBuffer;
10764    if (!strcmp(funcName, "vkEndCommandBuffer"))
10765        return (PFN_vkVoidFunction)vkEndCommandBuffer;
10766    if (!strcmp(funcName, "vkResetCommandBuffer"))
10767        return (PFN_vkVoidFunction)vkResetCommandBuffer;
10768    if (!strcmp(funcName, "vkCmdBindPipeline"))
10769        return (PFN_vkVoidFunction)vkCmdBindPipeline;
10770    if (!strcmp(funcName, "vkCmdSetViewport"))
10771        return (PFN_vkVoidFunction)vkCmdSetViewport;
10772    if (!strcmp(funcName, "vkCmdSetScissor"))
10773        return (PFN_vkVoidFunction)vkCmdSetScissor;
10774    if (!strcmp(funcName, "vkCmdSetLineWidth"))
10775        return (PFN_vkVoidFunction)vkCmdSetLineWidth;
10776    if (!strcmp(funcName, "vkCmdSetDepthBias"))
10777        return (PFN_vkVoidFunction)vkCmdSetDepthBias;
10778    if (!strcmp(funcName, "vkCmdSetBlendConstants"))
10779        return (PFN_vkVoidFunction)vkCmdSetBlendConstants;
10780    if (!strcmp(funcName, "vkCmdSetDepthBounds"))
10781        return (PFN_vkVoidFunction)vkCmdSetDepthBounds;
10782    if (!strcmp(funcName, "vkCmdSetStencilCompareMask"))
10783        return (PFN_vkVoidFunction)vkCmdSetStencilCompareMask;
10784    if (!strcmp(funcName, "vkCmdSetStencilWriteMask"))
10785        return (PFN_vkVoidFunction)vkCmdSetStencilWriteMask;
10786    if (!strcmp(funcName, "vkCmdSetStencilReference"))
10787        return (PFN_vkVoidFunction)vkCmdSetStencilReference;
10788    if (!strcmp(funcName, "vkCmdBindDescriptorSets"))
10789        return (PFN_vkVoidFunction)vkCmdBindDescriptorSets;
10790    if (!strcmp(funcName, "vkCmdBindVertexBuffers"))
10791        return (PFN_vkVoidFunction)vkCmdBindVertexBuffers;
10792    if (!strcmp(funcName, "vkCmdBindIndexBuffer"))
10793        return (PFN_vkVoidFunction)vkCmdBindIndexBuffer;
10794    if (!strcmp(funcName, "vkCmdDraw"))
10795        return (PFN_vkVoidFunction)vkCmdDraw;
10796    if (!strcmp(funcName, "vkCmdDrawIndexed"))
10797        return (PFN_vkVoidFunction)vkCmdDrawIndexed;
10798    if (!strcmp(funcName, "vkCmdDrawIndirect"))
10799        return (PFN_vkVoidFunction)vkCmdDrawIndirect;
10800    if (!strcmp(funcName, "vkCmdDrawIndexedIndirect"))
10801        return (PFN_vkVoidFunction)vkCmdDrawIndexedIndirect;
10802    if (!strcmp(funcName, "vkCmdDispatch"))
10803        return (PFN_vkVoidFunction)vkCmdDispatch;
10804    if (!strcmp(funcName, "vkCmdDispatchIndirect"))
10805        return (PFN_vkVoidFunction)vkCmdDispatchIndirect;
10806    if (!strcmp(funcName, "vkCmdCopyBuffer"))
10807        return (PFN_vkVoidFunction)vkCmdCopyBuffer;
10808    if (!strcmp(funcName, "vkCmdCopyImage"))
10809        return (PFN_vkVoidFunction)vkCmdCopyImage;
10810    if (!strcmp(funcName, "vkCmdBlitImage"))
10811        return (PFN_vkVoidFunction)vkCmdBlitImage;
10812    if (!strcmp(funcName, "vkCmdCopyBufferToImage"))
10813        return (PFN_vkVoidFunction)vkCmdCopyBufferToImage;
10814    if (!strcmp(funcName, "vkCmdCopyImageToBuffer"))
10815        return (PFN_vkVoidFunction)vkCmdCopyImageToBuffer;
10816    if (!strcmp(funcName, "vkCmdUpdateBuffer"))
10817        return (PFN_vkVoidFunction)vkCmdUpdateBuffer;
10818    if (!strcmp(funcName, "vkCmdFillBuffer"))
10819        return (PFN_vkVoidFunction)vkCmdFillBuffer;
10820    if (!strcmp(funcName, "vkCmdClearColorImage"))
10821        return (PFN_vkVoidFunction)vkCmdClearColorImage;
10822    if (!strcmp(funcName, "vkCmdClearDepthStencilImage"))
10823        return (PFN_vkVoidFunction)vkCmdClearDepthStencilImage;
10824    if (!strcmp(funcName, "vkCmdClearAttachments"))
10825        return (PFN_vkVoidFunction)vkCmdClearAttachments;
10826    if (!strcmp(funcName, "vkCmdResolveImage"))
10827        return (PFN_vkVoidFunction)vkCmdResolveImage;
10828    if (!strcmp(funcName, "vkCmdSetEvent"))
10829        return (PFN_vkVoidFunction)vkCmdSetEvent;
10830    if (!strcmp(funcName, "vkCmdResetEvent"))
10831        return (PFN_vkVoidFunction)vkCmdResetEvent;
10832    if (!strcmp(funcName, "vkCmdWaitEvents"))
10833        return (PFN_vkVoidFunction)vkCmdWaitEvents;
10834    if (!strcmp(funcName, "vkCmdPipelineBarrier"))
10835        return (PFN_vkVoidFunction)vkCmdPipelineBarrier;
10836    if (!strcmp(funcName, "vkCmdBeginQuery"))
10837        return (PFN_vkVoidFunction)vkCmdBeginQuery;
10838    if (!strcmp(funcName, "vkCmdEndQuery"))
10839        return (PFN_vkVoidFunction)vkCmdEndQuery;
10840    if (!strcmp(funcName, "vkCmdResetQueryPool"))
10841        return (PFN_vkVoidFunction)vkCmdResetQueryPool;
10842    if (!strcmp(funcName, "vkCmdCopyQueryPoolResults"))
10843        return (PFN_vkVoidFunction)vkCmdCopyQueryPoolResults;
10844    if (!strcmp(funcName, "vkCmdPushConstants"))
10845        return (PFN_vkVoidFunction)vkCmdPushConstants;
10846    if (!strcmp(funcName, "vkCmdWriteTimestamp"))
10847        return (PFN_vkVoidFunction)vkCmdWriteTimestamp;
10848    if (!strcmp(funcName, "vkCreateFramebuffer"))
10849        return (PFN_vkVoidFunction)vkCreateFramebuffer;
10850    if (!strcmp(funcName, "vkCreateShaderModule"))
10851        return (PFN_vkVoidFunction)vkCreateShaderModule;
10852    if (!strcmp(funcName, "vkCreateRenderPass"))
10853        return (PFN_vkVoidFunction)vkCreateRenderPass;
10854    if (!strcmp(funcName, "vkCmdBeginRenderPass"))
10855        return (PFN_vkVoidFunction)vkCmdBeginRenderPass;
10856    if (!strcmp(funcName, "vkCmdNextSubpass"))
10857        return (PFN_vkVoidFunction)vkCmdNextSubpass;
10858    if (!strcmp(funcName, "vkCmdEndRenderPass"))
10859        return (PFN_vkVoidFunction)vkCmdEndRenderPass;
10860    if (!strcmp(funcName, "vkCmdExecuteCommands"))
10861        return (PFN_vkVoidFunction)vkCmdExecuteCommands;
10862    if (!strcmp(funcName, "vkSetEvent"))
10863        return (PFN_vkVoidFunction)vkSetEvent;
10864    if (!strcmp(funcName, "vkMapMemory"))
10865        return (PFN_vkVoidFunction)vkMapMemory;
10866#if MTMERGESOURCE
10867    if (!strcmp(funcName, "vkUnmapMemory"))
10868        return (PFN_vkVoidFunction)vkUnmapMemory;
10869    if (!strcmp(funcName, "vkAllocateMemory"))
10870        return (PFN_vkVoidFunction)vkAllocateMemory;
10871    if (!strcmp(funcName, "vkFreeMemory"))
10872        return (PFN_vkVoidFunction)vkFreeMemory;
10873    if (!strcmp(funcName, "vkFlushMappedMemoryRanges"))
10874        return (PFN_vkVoidFunction)vkFlushMappedMemoryRanges;
10875    if (!strcmp(funcName, "vkInvalidateMappedMemoryRanges"))
10876        return (PFN_vkVoidFunction)vkInvalidateMappedMemoryRanges;
10877    if (!strcmp(funcName, "vkBindBufferMemory"))
10878        return (PFN_vkVoidFunction)vkBindBufferMemory;
10879    if (!strcmp(funcName, "vkGetBufferMemoryRequirements"))
10880        return (PFN_vkVoidFunction)vkGetBufferMemoryRequirements;
10881    if (!strcmp(funcName, "vkGetImageMemoryRequirements"))
10882        return (PFN_vkVoidFunction)vkGetImageMemoryRequirements;
10883#endif
10884    if (!strcmp(funcName, "vkGetQueryPoolResults"))
10885        return (PFN_vkVoidFunction)vkGetQueryPoolResults;
10886    if (!strcmp(funcName, "vkBindImageMemory"))
10887        return (PFN_vkVoidFunction)vkBindImageMemory;
10888    if (!strcmp(funcName, "vkQueueBindSparse"))
10889        return (PFN_vkVoidFunction)vkQueueBindSparse;
10890    if (!strcmp(funcName, "vkCreateSemaphore"))
10891        return (PFN_vkVoidFunction)vkCreateSemaphore;
10892    if (!strcmp(funcName, "vkCreateEvent"))
10893        return (PFN_vkVoidFunction)vkCreateEvent;
10894
10895    if (dev == NULL)
10896        return NULL;
10897
10898    layer_data *dev_data;
10899    dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
10900
10901    if (dev_data->device_extensions.wsi_enabled) {
10902        if (!strcmp(funcName, "vkCreateSwapchainKHR"))
10903            return (PFN_vkVoidFunction)vkCreateSwapchainKHR;
10904        if (!strcmp(funcName, "vkDestroySwapchainKHR"))
10905            return (PFN_vkVoidFunction)vkDestroySwapchainKHR;
10906        if (!strcmp(funcName, "vkGetSwapchainImagesKHR"))
10907            return (PFN_vkVoidFunction)vkGetSwapchainImagesKHR;
10908        if (!strcmp(funcName, "vkAcquireNextImageKHR"))
10909            return (PFN_vkVoidFunction)vkAcquireNextImageKHR;
10910        if (!strcmp(funcName, "vkQueuePresentKHR"))
10911            return (PFN_vkVoidFunction)vkQueuePresentKHR;
10912    }
10913
10914    VkLayerDispatchTable *pTable = dev_data->device_dispatch_table;
10915    {
10916        if (pTable->GetDeviceProcAddr == NULL)
10917            return NULL;
10918        return pTable->GetDeviceProcAddr(dev, funcName);
10919    }
10920}
10921
10922VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
10923    if (!strcmp(funcName, "vkGetInstanceProcAddr"))
10924        return (PFN_vkVoidFunction)vkGetInstanceProcAddr;
10925    if (!strcmp(funcName, "vkGetDeviceProcAddr"))
10926        return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
10927    if (!strcmp(funcName, "vkCreateInstance"))
10928        return (PFN_vkVoidFunction)vkCreateInstance;
10929    if (!strcmp(funcName, "vkCreateDevice"))
10930        return (PFN_vkVoidFunction)vkCreateDevice;
10931    if (!strcmp(funcName, "vkDestroyInstance"))
10932        return (PFN_vkVoidFunction)vkDestroyInstance;
10933#if MTMERGESOURCE
10934    if (!strcmp(funcName, "vkGetPhysicalDeviceMemoryProperties"))
10935        return (PFN_vkVoidFunction)vkGetPhysicalDeviceMemoryProperties;
10936#endif
10937    if (!strcmp(funcName, "vkEnumerateInstanceLayerProperties"))
10938        return (PFN_vkVoidFunction)vkEnumerateInstanceLayerProperties;
10939    if (!strcmp(funcName, "vkEnumerateInstanceExtensionProperties"))
10940        return (PFN_vkVoidFunction)vkEnumerateInstanceExtensionProperties;
10941    if (!strcmp(funcName, "vkEnumerateDeviceLayerProperties"))
10942        return (PFN_vkVoidFunction)vkEnumerateDeviceLayerProperties;
10943    if (!strcmp(funcName, "vkEnumerateDeviceExtensionProperties"))
10944        return (PFN_vkVoidFunction)vkEnumerateDeviceExtensionProperties;
10945
10946    if (instance == NULL)
10947        return NULL;
10948
10949    PFN_vkVoidFunction fptr;
10950
10951    layer_data *my_data;
10952    my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10953    fptr = debug_report_get_instance_proc_addr(my_data->report_data, funcName);
10954    if (fptr)
10955        return fptr;
10956
10957    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10958    if (pTable->GetInstanceProcAddr == NULL)
10959        return NULL;
10960    return pTable->GetInstanceProcAddr(instance, funcName);
10961}
10962