core_validation.cpp revision 72d66f0c1639cbaca92459498452d06db32d7aef
1/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and/or associated documentation files (the "Materials"), to
8 * deal in the Materials without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Materials, and to permit persons to whom the Materials
11 * are furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice(s) and this permission notice shall be included
14 * in all copies or substantial portions of the Materials.
15 *
16 * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 *
20 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE
23 * USE OR OTHER DEALINGS IN THE MATERIALS
24 *
25 * Author: Cody Northrop <cnorthrop@google.com>
26 * Author: Michael Lentine <mlentine@google.com>
27 * Author: Tobin Ehlis <tobine@google.com>
28 * Author: Chia-I Wu <olv@google.com>
29 * Author: Chris Forbes <chrisf@ijw.co.nz>
30 * Author: Mark Lobodzinski <mark@lunarg.com>
31 * Author: Ian Elliott <ianelliott@google.com>
32 */
33
34// Allow use of STL min and max functions in Windows
35#define NOMINMAX
36
37// Turn on mem_tracker merged code
38#define MTMERGE 1
39
40#include <stdio.h>
41#include <stdlib.h>
42#include <string.h>
43#include <assert.h>
44#include <unordered_map>
45#include <unordered_set>
46#include <map>
47#include <string>
48#include <iostream>
49#include <algorithm>
50#include <list>
51#include <SPIRV/spirv.hpp>
52#include <set>
53
54#include "vk_loader_platform.h"
55#include "vk_dispatch_table_helper.h"
56#include "vk_struct_string_helper_cpp.h"
57#if defined(__GNUC__)
58#pragma GCC diagnostic ignored "-Wwrite-strings"
59#endif
60#if defined(__GNUC__)
61#pragma GCC diagnostic warning "-Wwrite-strings"
62#endif
63#include "vk_struct_size_helper.h"
64#include "core_validation.h"
65#include "vk_layer_config.h"
66#include "vk_layer_table.h"
67#include "vk_layer_data.h"
68#include "vk_layer_logging.h"
69#include "vk_layer_extension_utils.h"
70#include "vk_layer_utils.h"
71
72#if defined __ANDROID__
73#include <android/log.h>
74#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
75#else
76#define LOGCONSOLE(...) printf(__VA_ARGS__)
77#endif
78
79using std::unordered_map;
80using std::unordered_set;
81
82#if MTMERGE
83// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
84// Object value will be used to identify them internally.
85static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
86#endif
87// Track command pools and their command buffers
88struct CMD_POOL_INFO {
89    VkCommandPoolCreateFlags createFlags;
90    uint32_t queueFamilyIndex;
91    list<VkCommandBuffer> commandBuffers; // list container of cmd buffers allocated from this pool
92};
93
94struct devExts {
95    VkBool32 wsi_enabled;
96    unordered_map<VkSwapchainKHR, SWAPCHAIN_NODE *> swapchainMap;
97    unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
98};
99
100// fwd decls
101struct shader_module;
102struct render_pass;
103
104struct layer_data {
105    debug_report_data *report_data;
106    std::vector<VkDebugReportCallbackEXT> logging_callback;
107    VkLayerDispatchTable *device_dispatch_table;
108    VkLayerInstanceDispatchTable *instance_dispatch_table;
109#if MTMERGE
110// MTMERGE - stuff pulled directly from MT
111    uint64_t currentFenceId;
112    // Maps for tracking key structs related to mem_tracker state
113    unordered_map<VkFramebuffer, MT_FB_INFO> fbMap;
114    unordered_map<VkRenderPass, MT_PASS_INFO> passMap;
115    unordered_map<VkDescriptorSet, MT_DESCRIPTOR_SET_INFO> descriptorSetMap;
116    // Images and Buffers are 2 objects that can have memory bound to them so they get special treatment
117    unordered_map<uint64_t, MT_OBJ_BINDING_INFO> imageBindingMap;
118    unordered_map<uint64_t, MT_OBJ_BINDING_INFO> bufferBindingMap;
119// MTMERGE - End of MT stuff
120#endif
121    devExts device_extensions;
122    vector<VkQueue> queues; // all queues under given device
123    // Global set of all cmdBuffers that are inFlight on this device
124    unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
125    // Layer specific data
126    unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> sampleMap;
127    unordered_map<VkImageView, VkImageViewCreateInfo> imageViewMap;
128    unordered_map<VkImage, IMAGE_NODE> imageMap;
129    unordered_map<VkBufferView, VkBufferViewCreateInfo> bufferViewMap;
130    unordered_map<VkBuffer, BUFFER_NODE> bufferMap;
131    unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap;
132    unordered_map<VkCommandPool, CMD_POOL_INFO> commandPoolMap;
133    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap;
134    unordered_map<VkDescriptorSet, SET_NODE *> setMap;
135    unordered_map<VkDescriptorSetLayout, LAYOUT_NODE *> descriptorSetLayoutMap;
136    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
137    unordered_map<VkDeviceMemory, DEVICE_MEM_INFO> memObjMap;
138    unordered_map<VkFence, FENCE_NODE> fenceMap;
139    unordered_map<VkQueue, QUEUE_NODE> queueMap;
140    unordered_map<VkEvent, EVENT_NODE> eventMap;
141    unordered_map<QueryObject, bool> queryToStateMap;
142    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
143    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
144    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
145    unordered_map<VkFramebuffer, FRAMEBUFFER_NODE> frameBufferMap;
146    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
147    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
148    unordered_map<VkRenderPass, RENDER_PASS_NODE *> renderPassMap;
149    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
150    // Current render pass
151    VkRenderPassBeginInfo renderPassBeginInfo;
152    uint32_t currentSubpass;
153
154    // Device specific data
155    PHYS_DEV_PROPERTIES_NODE physDevProperties;
156// MTMERGE - added a couple of fields to constructor initializer
157    layer_data()
158        : report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr),
159#if MTMERGE
160        currentFenceId(1),
161#endif
162        device_extensions(){};
163};
164
165static const VkLayerProperties cv_global_layers[] = {{
166    "VK_LAYER_LUNARG_core_validation", VK_API_VERSION, 1, "LunarG Validation Layer",
167}};
168
169template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
170    bool foundLayer = false;
171    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
172        if (!strcmp(createInfo.ppEnabledLayerNames[i], cv_global_layers[0].layerName)) {
173            foundLayer = true;
174        }
175        // This has to be logged to console as we don't have a callback at this point.
176        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
177            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
178                       cv_global_layers[0].layerName);
179        }
180    }
181}
182
183// Code imported from shader_checker
184static void build_def_index(shader_module *);
185
186// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
187// without the caller needing to care too much about the physical SPIRV module layout.
188struct spirv_inst_iter {
189    std::vector<uint32_t>::const_iterator zero;
190    std::vector<uint32_t>::const_iterator it;
191
192    uint32_t len() { return *it >> 16; }
193    uint32_t opcode() { return *it & 0x0ffffu; }
194    uint32_t const &word(unsigned n) { return it[n]; }
195    uint32_t offset() { return (uint32_t)(it - zero); }
196
197    spirv_inst_iter() {}
198
199    spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
200
201    bool operator==(spirv_inst_iter const &other) { return it == other.it; }
202
203    bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
204
205    spirv_inst_iter operator++(int) { /* x++ */
206        spirv_inst_iter ii = *this;
207        it += len();
208        return ii;
209    }
210
211    spirv_inst_iter operator++() { /* ++x; */
212        it += len();
213        return *this;
214    }
215
216    /* The iterator and the value are the same thing. */
217    spirv_inst_iter &operator*() { return *this; }
218    spirv_inst_iter const &operator*() const { return *this; }
219};
220
221struct shader_module {
222    /* the spirv image itself */
223    vector<uint32_t> words;
224    /* a mapping of <id> to the first word of its def. this is useful because walking type
225     * trees, constant expressions, etc requires jumping all over the instruction stream.
226     */
227    unordered_map<unsigned, unsigned> def_index;
228
229    shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
230        : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
231          def_index() {
232
233        build_def_index(this);
234    }
235
236    /* expose begin() / end() to enable range-based for */
237    spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
238    spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }         /* just past last insn */
239    /* given an offset into the module, produce an iterator there. */
240    spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
241
242    /* gets an iterator to the definition of an id */
243    spirv_inst_iter get_def(unsigned id) const {
244        auto it = def_index.find(id);
245        if (it == def_index.end()) {
246            return end();
247        }
248        return at(it->second);
249    }
250};
251
252// TODO : Do we need to guard access to layer_data_map w/ lock?
253static unordered_map<void *, layer_data *> layer_data_map;
254
255// TODO : This can be much smarter, using separate locks for separate global data
256static int globalLockInitialized = 0;
257static loader_platform_thread_mutex globalLock;
258#define MAX_TID 513
259static loader_platform_thread_id g_tidMapping[MAX_TID] = {0};
260static uint32_t g_maxTID = 0;
261#if MTMERGE
262// MTMERGE - start of direct pull
263static VkPhysicalDeviceMemoryProperties memProps;
264
265static void clear_cmd_buf_and_mem_references(layer_data *my_data, const VkCommandBuffer cb);
266
267#define MAX_BINDING 0xFFFFFFFF
268
269static MT_OBJ_BINDING_INFO *get_object_binding_info(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
270    MT_OBJ_BINDING_INFO *retValue = NULL;
271    switch (type) {
272    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
273        auto it = my_data->imageBindingMap.find(handle);
274        if (it != my_data->imageBindingMap.end())
275            return &(*it).second;
276        break;
277    }
278    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
279        auto it = my_data->bufferBindingMap.find(handle);
280        if (it != my_data->bufferBindingMap.end())
281            return &(*it).second;
282        break;
283    }
284    default:
285        break;
286    }
287    return retValue;
288}
289// MTMERGE - end section
290#endif
291template layer_data *get_my_data_ptr<layer_data>(void *data_key, std::unordered_map<void *, layer_data *> &data_map);
292
293// prototype
294static GLOBAL_CB_NODE *getCBNode(layer_data *, const VkCommandBuffer);
295
296#if MTMERGE
297static void delete_queue_info_list(layer_data *my_data) {
298    // Process queue list, cleaning up each entry before deleting
299    my_data->queueMap.clear();
300}
301
302// Delete CBInfo from container and clear mem references to CB
303static void delete_cmd_buf_info(layer_data *my_data, VkCommandPool commandPool, const VkCommandBuffer cb) {
304    clear_cmd_buf_and_mem_references(my_data, cb);
305    // Delete the CBInfo info
306    my_data->commandPoolMap[commandPool].commandBuffers.remove(cb);
307    my_data->commandBufferMap.erase(cb);
308}
309
310static void add_object_binding_info(layer_data *my_data, const uint64_t handle, const VkDebugReportObjectTypeEXT type,
311                                    const VkDeviceMemory mem) {
312    switch (type) {
313    // Buffers and images are unique as their CreateInfo is in container struct
314    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
315        auto pCI = &my_data->bufferBindingMap[handle];
316        pCI->mem = mem;
317        break;
318    }
319    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
320        auto pCI = &my_data->imageBindingMap[handle];
321        pCI->mem = mem;
322        break;
323    }
324    default:
325        break;
326    }
327}
328
329static void add_object_create_info(layer_data *my_data, const uint64_t handle, const VkDebugReportObjectTypeEXT type,
330                                   const void *pCreateInfo) {
331    // TODO : For any CreateInfo struct that has ptrs, need to deep copy them and appropriately clean up on Destroy
332    switch (type) {
333    // Buffers and images are unique as their CreateInfo is in container struct
334    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
335        auto pCI = &my_data->bufferBindingMap[handle];
336        memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
337        memcpy(&pCI->create_info.buffer, pCreateInfo, sizeof(VkBufferCreateInfo));
338        break;
339    }
340    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
341        auto pCI = &my_data->imageBindingMap[handle];
342        memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
343        memcpy(&pCI->create_info.image, pCreateInfo, sizeof(VkImageCreateInfo));
344        break;
345    }
346    // Swap Chain is very unique, use my_data->imageBindingMap, but copy in
347    // SwapChainCreatInfo's usage flags and set the mem value to a unique key. These is used by
348    // vkCreateImageView and internal mem_tracker routines to distinguish swap chain images
349    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT: {
350        auto pCI = &my_data->imageBindingMap[handle];
351        memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
352        pCI->mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
353        pCI->valid = false;
354        pCI->create_info.image.usage =
355            const_cast<VkSwapchainCreateInfoKHR *>(static_cast<const VkSwapchainCreateInfoKHR *>(pCreateInfo))->imageUsage;
356        break;
357    }
358    default:
359        break;
360    }
361}
362
363// Add a fence, creating one if necessary to our list of fences/fenceIds
364static VkBool32 add_fence_info(layer_data *my_data, VkFence fence, VkQueue queue, uint64_t *fenceId) {
365    VkBool32 skipCall = VK_FALSE;
366    *fenceId = my_data->currentFenceId++;
367
368    // If no fence, create an internal fence to track the submissions
369    if (fence != VK_NULL_HANDLE) {
370        my_data->fenceMap[fence].fenceId = *fenceId;
371        my_data->fenceMap[fence].queue = queue;
372        // Validate that fence is in UNSIGNALED state
373        VkFenceCreateInfo *pFenceCI = &(my_data->fenceMap[fence].createInfo);
374        if (pFenceCI->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
375            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
376                               (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
377                               "Fence %#" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
378                               (uint64_t)fence);
379        }
380    } else {
381        // TODO : Do we need to create an internal fence here for tracking purposes?
382    }
383    // Update most recently submitted fence and fenceId for Queue
384    my_data->queueMap[queue].lastSubmittedId = *fenceId;
385    return skipCall;
386}
387
388// Remove a fenceInfo from our list of fences/fenceIds
389static void delete_fence_info(layer_data *my_data, VkFence fence) { my_data->fenceMap.erase(fence); }
390
391// Record information when a fence is known to be signalled
392static void update_fence_tracking(layer_data *my_data, VkFence fence) {
393    auto fence_item = my_data->fenceMap.find(fence);
394    if (fence_item != my_data->fenceMap.end()) {
395        FENCE_NODE *pCurFenceInfo = &(*fence_item).second;
396        VkQueue queue = pCurFenceInfo->queue;
397        auto queue_item = my_data->queueMap.find(queue);
398        if (queue_item != my_data->queueMap.end()) {
399            QUEUE_NODE *pQueueInfo = &(*queue_item).second;
400            if (pQueueInfo->lastRetiredId < pCurFenceInfo->fenceId) {
401                pQueueInfo->lastRetiredId = pCurFenceInfo->fenceId;
402            }
403        }
404    }
405
406    // Update fence state in fenceCreateInfo structure
407    auto pFCI = &(my_data->fenceMap[fence].createInfo);
408    pFCI->flags = static_cast<VkFenceCreateFlags>(pFCI->flags | VK_FENCE_CREATE_SIGNALED_BIT);
409}
410
411// Helper routine that updates the fence list for a specific queue to all-retired
412static void retire_queue_fences(layer_data *my_data, VkQueue queue) {
413    QUEUE_NODE *pQueueInfo = &my_data->queueMap[queue];
414    // Set queue's lastRetired to lastSubmitted indicating all fences completed
415    pQueueInfo->lastRetiredId = pQueueInfo->lastSubmittedId;
416}
417
418// Helper routine that updates all queues to all-retired
419static void retire_device_fences(layer_data *my_data, VkDevice device) {
420    // Process each queue for device
421    // TODO: Add multiple device support
422    for (auto ii = my_data->queueMap.begin(); ii != my_data->queueMap.end(); ++ii) {
423        // Set queue's lastRetired to lastSubmitted indicating all fences completed
424        QUEUE_NODE *pQueueInfo = &(*ii).second;
425        pQueueInfo->lastRetiredId = pQueueInfo->lastSubmittedId;
426    }
427}
428
429// Helper function to validate correct usage bits set for buffers or images
430//  Verify that (actual & desired) flags != 0 or,
431//   if strict is true, verify that (actual & desired) flags == desired
432//  In case of error, report it via dbg callbacks
433static VkBool32 validate_usage_flags(layer_data *my_data, void *disp_obj, VkFlags actual, VkFlags desired, VkBool32 strict,
434                                     uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str,
435                                     char const *func_name, char const *usage_str) {
436    VkBool32 correct_usage = VK_FALSE;
437    VkBool32 skipCall = VK_FALSE;
438    if (strict)
439        correct_usage = ((actual & desired) == desired);
440    else
441        correct_usage = ((actual & desired) != 0);
442    if (!correct_usage) {
443        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
444                           MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s %#" PRIxLEAST64
445                                                               " used by %s. In this case, %s should have %s set during creation.",
446                           ty_str, obj_handle, func_name, ty_str, usage_str);
447    }
448    return skipCall;
449}
450
451// Helper function to validate usage flags for images
452// Pulls image info and then sends actual vs. desired usage off to helper above where
453//  an error will be flagged if usage is not correct
454static VkBool32 validate_image_usage_flags(layer_data *my_data, void *disp_obj, VkImage image, VkFlags desired, VkBool32 strict,
455                                           char const *func_name, char const *usage_string) {
456    VkBool32 skipCall = VK_FALSE;
457    MT_OBJ_BINDING_INFO *pBindInfo = get_object_binding_info(my_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
458    if (pBindInfo) {
459        skipCall = validate_usage_flags(my_data, disp_obj, pBindInfo->create_info.image.usage, desired, strict, (uint64_t)image,
460                                        VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "image", func_name, usage_string);
461    }
462    return skipCall;
463}
464
465// Helper function to validate usage flags for buffers
466// Pulls buffer info and then sends actual vs. desired usage off to helper above where
467//  an error will be flagged if usage is not correct
468static VkBool32 validate_buffer_usage_flags(layer_data *my_data, void *disp_obj, VkBuffer buffer, VkFlags desired, VkBool32 strict,
469                                            char const *func_name, char const *usage_string) {
470    VkBool32 skipCall = VK_FALSE;
471    MT_OBJ_BINDING_INFO *pBindInfo = get_object_binding_info(my_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
472    if (pBindInfo) {
473        skipCall = validate_usage_flags(my_data, disp_obj, pBindInfo->create_info.buffer.usage, desired, strict, (uint64_t)buffer,
474                                        VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "buffer", func_name, usage_string);
475    }
476    return skipCall;
477}
478
479// Return ptr to info in map container containing mem, or NULL if not found
480//  Calls to this function should be wrapped in mutex
481static DEVICE_MEM_INFO *get_mem_obj_info(layer_data *dev_data, const VkDeviceMemory mem) {
482    auto item = dev_data->memObjMap.find(mem);
483    if (item != dev_data->memObjMap.end()) {
484        return &(*item).second;
485    } else {
486        return NULL;
487    }
488}
489
490static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
491                             const VkMemoryAllocateInfo *pAllocateInfo) {
492    assert(object != NULL);
493
494    memcpy(&my_data->memObjMap[mem].allocInfo, pAllocateInfo, sizeof(VkMemoryAllocateInfo));
495    // TODO:  Update for real hardware, actually process allocation info structures
496    my_data->memObjMap[mem].allocInfo.pNext = NULL;
497    my_data->memObjMap[mem].object = object;
498    my_data->memObjMap[mem].refCount = 0;
499    my_data->memObjMap[mem].mem = mem;
500    my_data->memObjMap[mem].image = VK_NULL_HANDLE;
501    my_data->memObjMap[mem].memRange.offset = 0;
502    my_data->memObjMap[mem].memRange.size = 0;
503    my_data->memObjMap[mem].pData = 0;
504    my_data->memObjMap[mem].pDriverData = 0;
505    my_data->memObjMap[mem].valid = false;
506}
507
508static VkBool32 validate_memory_is_valid(layer_data *dev_data, VkDeviceMemory mem, const char *functionName,
509                                         VkImage image = VK_NULL_HANDLE) {
510    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
511        MT_OBJ_BINDING_INFO *pBindInfo =
512            get_object_binding_info(dev_data, reinterpret_cast<const uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
513        if (pBindInfo && !pBindInfo->valid) {
514            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
515                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
516                           "%s: Cannot read invalid swapchain image %" PRIx64 ", please fill the memory before using.",
517                           functionName, (uint64_t)(image));
518        }
519    } else {
520        DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
521        if (pMemObj && !pMemObj->valid) {
522            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
523                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
524                           "%s: Cannot read invalid memory %" PRIx64 ", please fill the memory before using.", functionName,
525                           (uint64_t)(mem));
526        }
527    }
528    return false;
529}
530
531static void set_memory_valid(layer_data *dev_data, VkDeviceMemory mem, bool valid, VkImage image = VK_NULL_HANDLE) {
532    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
533        MT_OBJ_BINDING_INFO *pBindInfo =
534            get_object_binding_info(dev_data, reinterpret_cast<const uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
535        if (pBindInfo) {
536            pBindInfo->valid = valid;
537        }
538    } else {
539        DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
540        if (pMemObj) {
541            pMemObj->valid = valid;
542        }
543    }
544}
545
546// Find CB Info and add mem reference to list container
547// Find Mem Obj Info and add CB reference to list container
548static VkBool32 update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
549                                                  const char *apiName) {
550    VkBool32 skipCall = VK_FALSE;
551
552    // Skip validation if this image was created through WSI
553    if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
554
555        // First update CB binding in MemObj mini CB list
556        DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem);
557        if (pMemInfo) {
558            // Search for cmd buffer object in memory object's binding list
559            VkBool32 found = VK_FALSE;
560            if (pMemInfo->pCommandBufferBindings.size() > 0) {
561                for (list<VkCommandBuffer>::iterator it = pMemInfo->pCommandBufferBindings.begin();
562                     it != pMemInfo->pCommandBufferBindings.end(); ++it) {
563                    if ((*it) == cb) {
564                        found = VK_TRUE;
565                        break;
566                    }
567                }
568            }
569            // If not present, add to list
570            if (found == VK_FALSE) {
571                pMemInfo->pCommandBufferBindings.push_front(cb);
572                pMemInfo->refCount++;
573            }
574            // Now update CBInfo's Mem reference list
575            GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
576            // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
577            if (pCBNode) {
578                // Search for memory object in cmd buffer's reference list
579                VkBool32 found = VK_FALSE;
580                if (pCBNode->pMemObjList.size() > 0) {
581                    for (auto it = pCBNode->pMemObjList.begin(); it != pCBNode->pMemObjList.end(); ++it) {
582                        if ((*it) == mem) {
583                            found = VK_TRUE;
584                            break;
585                        }
586                    }
587                }
588                // If not present, add to list
589                if (found == VK_FALSE) {
590                    pCBNode->pMemObjList.push_front(mem);
591                }
592            }
593        }
594    }
595    return skipCall;
596}
597
598// Free bindings related to CB
599static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
600    GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
601
602    if (pCBNode) {
603        if (pCBNode->pMemObjList.size() > 0) {
604            list<VkDeviceMemory> mem_obj_list = pCBNode->pMemObjList;
605            for (list<VkDeviceMemory>::iterator it = mem_obj_list.begin(); it != mem_obj_list.end(); ++it) {
606                DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, *it);
607                if (pInfo) {
608                    pInfo->pCommandBufferBindings.remove(cb);
609                    pInfo->refCount--;
610                }
611            }
612            pCBNode->pMemObjList.clear();
613        }
614        pCBNode->activeDescriptorSets.clear();
615        pCBNode->validate_functions.clear();
616    }
617}
618
619// Delete the entire CB list
620static void delete_cmd_buf_info_list(layer_data *my_data) {
621    for (auto &cb_node : my_data->commandBufferMap) {
622        clear_cmd_buf_and_mem_references(my_data, cb_node.first);
623    }
624    my_data->commandBufferMap.clear();
625}
626
627// For given MemObjInfo, report Obj & CB bindings
628static VkBool32 reportMemReferencesAndCleanUp(layer_data *dev_data, DEVICE_MEM_INFO *pMemObjInfo) {
629    VkBool32 skipCall = VK_FALSE;
630    size_t cmdBufRefCount = pMemObjInfo->pCommandBufferBindings.size();
631    size_t objRefCount = pMemObjInfo->pObjBindings.size();
632
633    if ((pMemObjInfo->pCommandBufferBindings.size()) != 0) {
634        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
635                           (uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
636                           "Attempting to free memory object %#" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER
637                           " references",
638                           (uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount));
639    }
640
641    if (cmdBufRefCount > 0 && pMemObjInfo->pCommandBufferBindings.size() > 0) {
642        for (list<VkCommandBuffer>::const_iterator it = pMemObjInfo->pCommandBufferBindings.begin();
643             it != pMemObjInfo->pCommandBufferBindings.end(); ++it) {
644            // TODO : CommandBuffer should be source Obj here
645            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
646                    (uint64_t)(*it), __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
647                    "Command Buffer %p still has a reference to mem obj %#" PRIxLEAST64, (*it), (uint64_t)pMemObjInfo->mem);
648        }
649        // Clear the list of hanging references
650        pMemObjInfo->pCommandBufferBindings.clear();
651    }
652
653    if (objRefCount > 0 && pMemObjInfo->pObjBindings.size() > 0) {
654        for (auto it = pMemObjInfo->pObjBindings.begin(); it != pMemObjInfo->pObjBindings.end(); ++it) {
655            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, it->type, it->handle, __LINE__,
656                    MEMTRACK_FREED_MEM_REF, "MEM", "VK Object %#" PRIxLEAST64 " still has a reference to mem obj %#" PRIxLEAST64,
657                    it->handle, (uint64_t)pMemObjInfo->mem);
658        }
659        // Clear the list of hanging references
660        pMemObjInfo->pObjBindings.clear();
661    }
662    return skipCall;
663}
664
665static VkBool32 deleteMemObjInfo(layer_data *my_data, void *object, VkDeviceMemory mem) {
666    VkBool32 skipCall = VK_FALSE;
667    auto item = my_data->memObjMap.find(mem);
668    if (item != my_data->memObjMap.end()) {
669        my_data->memObjMap.erase(item);
670    } else {
671        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
672                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
673                           "Request to delete memory object %#" PRIxLEAST64 " not present in memory Object Map", (uint64_t)mem);
674    }
675    return skipCall;
676}
677
678// Check if fence for given CB is completed
679static bool checkCBCompleted(layer_data *my_data, const VkCommandBuffer cb, bool *complete) {
680    GLOBAL_CB_NODE *pCBNode = getCBNode(my_data, cb);
681    VkBool32 skipCall = false;
682    *complete = true;
683
684    if (pCBNode) {
685        if (pCBNode->lastSubmittedQueue != NULL) {
686            VkQueue queue = pCBNode->lastSubmittedQueue;
687            QUEUE_NODE *pQueueInfo = &my_data->queueMap[queue];
688            if (pCBNode->fenceId > pQueueInfo->lastRetiredId) {
689                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
690                                   VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)cb, __LINE__, MEMTRACK_NONE, "MEM",
691                                   "fence %#" PRIxLEAST64 " for CB %p has not been checked for completion",
692                                   (uint64_t)pCBNode->lastSubmittedFence, cb);
693                *complete = false;
694            }
695        }
696    }
697    return skipCall;
698}
699
700static VkBool32 freeMemObjInfo(layer_data *dev_data, void *object, VkDeviceMemory mem, VkBool32 internal) {
701    VkBool32 skipCall = VK_FALSE;
702    // Parse global list to find info w/ mem
703    DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
704    if (pInfo) {
705        if (pInfo->allocInfo.allocationSize == 0 && !internal) {
706            // TODO: Verify against Valid Use section
707            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
708                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
709                               "Attempting to free memory associated with a Persistent Image, %#" PRIxLEAST64 ", "
710                               "this should not be explicitly freed\n",
711                               (uint64_t)mem);
712        } else {
713            // Clear any CB bindings for completed CBs
714            //   TODO : Is there a better place to do this?
715
716            bool commandBufferComplete = false;
717            assert(pInfo->object != VK_NULL_HANDLE);
718            list<VkCommandBuffer>::iterator it = pInfo->pCommandBufferBindings.begin();
719            list<VkCommandBuffer>::iterator temp;
720            while (pInfo->pCommandBufferBindings.size() > 0 && it != pInfo->pCommandBufferBindings.end()) {
721                skipCall |= checkCBCompleted(dev_data, *it, &commandBufferComplete);
722                if (commandBufferComplete) {
723                    temp = it;
724                    ++temp;
725                    clear_cmd_buf_and_mem_references(dev_data, *it);
726                    it = temp;
727                } else {
728                    ++it;
729                }
730            }
731
732            // Now verify that no references to this mem obj remain and remove bindings
733            if (0 != pInfo->refCount) {
734                skipCall |= reportMemReferencesAndCleanUp(dev_data, pInfo);
735            }
736            // Delete mem obj info
737            skipCall |= deleteMemObjInfo(dev_data, object, mem);
738        }
739    }
740    return skipCall;
741}
742
743static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
744    switch (type) {
745    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
746        return "image";
747        break;
748    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
749        return "buffer";
750        break;
751    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
752        return "swapchain";
753        break;
754    default:
755        return "unknown";
756    }
757}
758
759// Remove object binding performs 3 tasks:
760// 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it
761// 2. Decrement refCount for MemObjInfo
762// 3. Clear mem binding for image/buffer by setting its handle to 0
763// TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized?
764static VkBool32 clear_object_binding(layer_data *dev_data, void *dispObj, uint64_t handle, VkDebugReportObjectTypeEXT type) {
765    // TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately
766    VkBool32 skipCall = VK_FALSE;
767    MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(dev_data, handle, type);
768    if (pObjBindInfo) {
769        DEVICE_MEM_INFO *pMemObjInfo = get_mem_obj_info(dev_data, pObjBindInfo->mem);
770        // TODO : Make sure this is a reasonable way to reset mem binding
771        pObjBindInfo->mem = VK_NULL_HANDLE;
772        if (pMemObjInfo) {
773            // This obj is bound to a memory object. Remove the reference to this object in that memory object's list, decrement the
774            // memObj's refcount
775            // and set the objects memory binding pointer to NULL.
776            VkBool32 clearSucceeded = VK_FALSE;
777            for (auto it = pMemObjInfo->pObjBindings.begin(); it != pMemObjInfo->pObjBindings.end(); ++it) {
778                if ((it->handle == handle) && (it->type == type)) {
779                    pMemObjInfo->refCount--;
780                    pMemObjInfo->pObjBindings.erase(it);
781                    clearSucceeded = VK_TRUE;
782                    break;
783                }
784            }
785            if (VK_FALSE == clearSucceeded) {
786                skipCall |=
787                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
788                            "MEM", "While trying to clear mem binding for %s obj %#" PRIxLEAST64
789                                   ", unable to find that object referenced by mem obj %#" PRIxLEAST64,
790                            object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem);
791            }
792        }
793    }
794    return skipCall;
795}
796
797// For NULL mem case, output warning
798// Make sure given object is in global object map
799//  IF a previous binding existed, output validation error
800//  Otherwise, add reference from objectInfo to memoryInfo
801//  Add reference off of objInfo
802//  device is required for error logging, need a dispatchable
803//  object for that.
804static VkBool32 set_mem_binding(layer_data *dev_data, void *dispatch_object, VkDeviceMemory mem, uint64_t handle,
805                                VkDebugReportObjectTypeEXT type, const char *apiName) {
806    VkBool32 skipCall = VK_FALSE;
807    // Handle NULL case separately, just clear previous binding & decrement reference
808    if (mem == VK_NULL_HANDLE) {
809        // TODO: Verify against Valid Use section of spec.
810        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ,
811                           "MEM", "In %s, attempting to Bind Obj(%#" PRIxLEAST64 ") to NULL", apiName, handle);
812    } else {
813        MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(dev_data, handle, type);
814        if (!pObjBindInfo) {
815            skipCall |=
816                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS,
817                        "MEM", "In %s, attempting to update Binding of %s Obj(%#" PRIxLEAST64 ") that's not in global list()",
818                        object_type_to_string(type), apiName, handle);
819        } else {
820            // non-null case so should have real mem obj
821            DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem);
822            if (pMemInfo) {
823                // TODO : Need to track mem binding for obj and report conflict here
824                DEVICE_MEM_INFO *pPrevBinding = get_mem_obj_info(dev_data, pObjBindInfo->mem);
825                if (pPrevBinding != NULL) {
826                    skipCall |=
827                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
828                                (uint64_t)mem, __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
829                                "In %s, attempting to bind memory (%#" PRIxLEAST64 ") to object (%#" PRIxLEAST64
830                                ") which has already been bound to mem object %#" PRIxLEAST64,
831                                apiName, (uint64_t)mem, handle, (uint64_t)pPrevBinding->mem);
832                } else {
833                    MT_OBJ_HANDLE_TYPE oht;
834                    oht.handle = handle;
835                    oht.type = type;
836                    pMemInfo->pObjBindings.push_front(oht);
837                    pMemInfo->refCount++;
838                    // For image objects, make sure default memory state is correctly set
839                    // TODO : What's the best/correct way to handle this?
840                    if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
841                        VkImageCreateInfo ici = pObjBindInfo->create_info.image;
842                        if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
843                            // TODO::  More memory state transition stuff.
844                        }
845                    }
846                    pObjBindInfo->mem = mem;
847                }
848            }
849        }
850    }
851    return skipCall;
852}
853
854// For NULL mem case, clear any previous binding Else...
855// Make sure given object is in its object map
856//  IF a previous binding existed, update binding
857//  Add reference from objectInfo to memoryInfo
858//  Add reference off of object's binding info
859// Return VK_TRUE if addition is successful, VK_FALSE otherwise
860static VkBool32 set_sparse_mem_binding(layer_data *dev_data, void *dispObject, VkDeviceMemory mem, uint64_t handle,
861                                       VkDebugReportObjectTypeEXT type, const char *apiName) {
862    VkBool32 skipCall = VK_FALSE;
863    // Handle NULL case separately, just clear previous binding & decrement reference
864    if (mem == VK_NULL_HANDLE) {
865        skipCall = clear_object_binding(dev_data, dispObject, handle, type);
866    } else {
867        MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(dev_data, handle, type);
868        if (!pObjBindInfo) {
869            skipCall |= log_msg(
870                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS, "MEM",
871                "In %s, attempting to update Binding of Obj(%#" PRIxLEAST64 ") that's not in global list()", apiName, handle);
872        }
873        // non-null case so should have real mem obj
874        DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
875        if (pInfo) {
876            // Search for object in memory object's binding list
877            VkBool32 found = VK_FALSE;
878            if (pInfo->pObjBindings.size() > 0) {
879                for (auto it = pInfo->pObjBindings.begin(); it != pInfo->pObjBindings.end(); ++it) {
880                    if (((*it).handle == handle) && ((*it).type == type)) {
881                        found = VK_TRUE;
882                        break;
883                    }
884                }
885            }
886            // If not present, add to list
887            if (found == VK_FALSE) {
888                MT_OBJ_HANDLE_TYPE oht;
889                oht.handle = handle;
890                oht.type = type;
891                pInfo->pObjBindings.push_front(oht);
892                pInfo->refCount++;
893            }
894            // Need to set mem binding for this object
895            pObjBindInfo->mem = mem;
896        }
897    }
898    return skipCall;
899}
900
901template <typename T>
902void print_object_map_members(layer_data *my_data, void *dispObj, T const &objectName, VkDebugReportObjectTypeEXT objectType,
903                              const char *objectStr) {
904    for (auto const &element : objectName) {
905        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objectType, 0, __LINE__, MEMTRACK_NONE, "MEM",
906                "    %s Object list contains %s Object %#" PRIxLEAST64 " ", objectStr, objectStr, element.first);
907    }
908}
909
910// For given Object, get 'mem' obj that it's bound to or NULL if no binding
911static VkBool32 get_mem_binding_from_object(layer_data *my_data, void *dispObj, const uint64_t handle,
912                                            const VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) {
913    VkBool32 skipCall = VK_FALSE;
914    *mem = VK_NULL_HANDLE;
915    MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(my_data, handle, type);
916    if (pObjBindInfo) {
917        if (pObjBindInfo->mem) {
918            *mem = pObjBindInfo->mem;
919        } else {
920            skipCall =
921                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS,
922                        "MEM", "Trying to get mem binding for object %#" PRIxLEAST64 " but object has no mem binding", handle);
923        }
924    } else {
925        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
926                           "MEM", "Trying to get mem binding for object %#" PRIxLEAST64 " but no such object in %s list", handle,
927                           object_type_to_string(type));
928    }
929    return skipCall;
930}
931
932// Print details of MemObjInfo list
933static void print_mem_list(layer_data *dev_data, void *dispObj) {
934    DEVICE_MEM_INFO *pInfo = NULL;
935
936    // Early out if info is not requested
937    if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
938        return;
939    }
940
941    // Just printing each msg individually for now, may want to package these into single large print
942    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
943            MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
944            dev_data->memObjMap.size());
945    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
946            MEMTRACK_NONE, "MEM", "=============================");
947
948    if (dev_data->memObjMap.size() <= 0)
949        return;
950
951    for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
952        pInfo = &(*ii).second;
953
954        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
955                __LINE__, MEMTRACK_NONE, "MEM", "    ===MemObjInfo at %p===", (void *)pInfo);
956        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
957                __LINE__, MEMTRACK_NONE, "MEM", "    Mem object: %#" PRIxLEAST64, (uint64_t)(pInfo->mem));
958        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
959                __LINE__, MEMTRACK_NONE, "MEM", "    Ref Count: %u", pInfo->refCount);
960        if (0 != pInfo->allocInfo.allocationSize) {
961            string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&pInfo->allocInfo, "MEM(INFO):         ");
962            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
963                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
964        } else {
965            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
966                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
967        }
968
969        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
970                __LINE__, MEMTRACK_NONE, "MEM", "    VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:",
971                pInfo->pObjBindings.size());
972        if (pInfo->pObjBindings.size() > 0) {
973            for (list<MT_OBJ_HANDLE_TYPE>::iterator it = pInfo->pObjBindings.begin(); it != pInfo->pObjBindings.end(); ++it) {
974                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
975                        0, __LINE__, MEMTRACK_NONE, "MEM", "       VK OBJECT %" PRIu64, it->handle);
976            }
977        }
978
979        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
980                __LINE__, MEMTRACK_NONE, "MEM",
981                "    VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements",
982                pInfo->pCommandBufferBindings.size());
983        if (pInfo->pCommandBufferBindings.size() > 0) {
984            for (list<VkCommandBuffer>::iterator it = pInfo->pCommandBufferBindings.begin();
985                 it != pInfo->pCommandBufferBindings.end(); ++it) {
986                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
987                        0, __LINE__, MEMTRACK_NONE, "MEM", "      VK CB %p", (*it));
988            }
989        }
990    }
991}
992
993static void printCBList(layer_data *my_data, void *dispObj) {
994    GLOBAL_CB_NODE *pCBInfo = NULL;
995
996    // Early out if info is not requested
997    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
998        return;
999    }
1000
1001    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
1002            MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
1003            my_data->commandBufferMap.size());
1004    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
1005            MEMTRACK_NONE, "MEM", "==================");
1006
1007    if (my_data->commandBufferMap.size() <= 0)
1008        return;
1009
1010    for (auto &cb_node : my_data->commandBufferMap) {
1011        pCBInfo = cb_node.second;
1012
1013        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
1014                __LINE__, MEMTRACK_NONE, "MEM", "    CB Info (%p) has CB %p, fenceId %" PRIx64 ", and fence %#" PRIxLEAST64,
1015                (void *)pCBInfo, (void *)pCBInfo->commandBuffer, pCBInfo->fenceId, (uint64_t)pCBInfo->lastSubmittedFence);
1016
1017        if (pCBInfo->pMemObjList.size() <= 0)
1018            continue;
1019        for (list<VkDeviceMemory>::iterator it = pCBInfo->pMemObjList.begin(); it != pCBInfo->pMemObjList.end(); ++it) {
1020            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
1021                    __LINE__, MEMTRACK_NONE, "MEM", "      Mem obj %" PRIu64, (uint64_t)(*it));
1022        }
1023    }
1024}
1025
1026#endif
1027
1028// Map actual TID to an index value and return that index
1029//  This keeps TIDs in range from 0-MAX_TID and simplifies compares between runs
1030static uint32_t getTIDIndex() {
1031    loader_platform_thread_id tid = loader_platform_get_thread_id();
1032    for (uint32_t i = 0; i < g_maxTID; i++) {
1033        if (tid == g_tidMapping[i])
1034            return i;
1035    }
1036    // Don't yet have mapping, set it and return newly set index
1037    uint32_t retVal = (uint32_t)g_maxTID;
1038    g_tidMapping[g_maxTID++] = tid;
1039    assert(g_maxTID < MAX_TID);
1040    return retVal;
1041}
1042
1043// Return a string representation of CMD_TYPE enum
1044static string cmdTypeToString(CMD_TYPE cmd) {
1045    switch (cmd) {
1046    case CMD_BINDPIPELINE:
1047        return "CMD_BINDPIPELINE";
1048    case CMD_BINDPIPELINEDELTA:
1049        return "CMD_BINDPIPELINEDELTA";
1050    case CMD_SETVIEWPORTSTATE:
1051        return "CMD_SETVIEWPORTSTATE";
1052    case CMD_SETLINEWIDTHSTATE:
1053        return "CMD_SETLINEWIDTHSTATE";
1054    case CMD_SETDEPTHBIASSTATE:
1055        return "CMD_SETDEPTHBIASSTATE";
1056    case CMD_SETBLENDSTATE:
1057        return "CMD_SETBLENDSTATE";
1058    case CMD_SETDEPTHBOUNDSSTATE:
1059        return "CMD_SETDEPTHBOUNDSSTATE";
1060    case CMD_SETSTENCILREADMASKSTATE:
1061        return "CMD_SETSTENCILREADMASKSTATE";
1062    case CMD_SETSTENCILWRITEMASKSTATE:
1063        return "CMD_SETSTENCILWRITEMASKSTATE";
1064    case CMD_SETSTENCILREFERENCESTATE:
1065        return "CMD_SETSTENCILREFERENCESTATE";
1066    case CMD_BINDDESCRIPTORSETS:
1067        return "CMD_BINDDESCRIPTORSETS";
1068    case CMD_BINDINDEXBUFFER:
1069        return "CMD_BINDINDEXBUFFER";
1070    case CMD_BINDVERTEXBUFFER:
1071        return "CMD_BINDVERTEXBUFFER";
1072    case CMD_DRAW:
1073        return "CMD_DRAW";
1074    case CMD_DRAWINDEXED:
1075        return "CMD_DRAWINDEXED";
1076    case CMD_DRAWINDIRECT:
1077        return "CMD_DRAWINDIRECT";
1078    case CMD_DRAWINDEXEDINDIRECT:
1079        return "CMD_DRAWINDEXEDINDIRECT";
1080    case CMD_DISPATCH:
1081        return "CMD_DISPATCH";
1082    case CMD_DISPATCHINDIRECT:
1083        return "CMD_DISPATCHINDIRECT";
1084    case CMD_COPYBUFFER:
1085        return "CMD_COPYBUFFER";
1086    case CMD_COPYIMAGE:
1087        return "CMD_COPYIMAGE";
1088    case CMD_BLITIMAGE:
1089        return "CMD_BLITIMAGE";
1090    case CMD_COPYBUFFERTOIMAGE:
1091        return "CMD_COPYBUFFERTOIMAGE";
1092    case CMD_COPYIMAGETOBUFFER:
1093        return "CMD_COPYIMAGETOBUFFER";
1094    case CMD_CLONEIMAGEDATA:
1095        return "CMD_CLONEIMAGEDATA";
1096    case CMD_UPDATEBUFFER:
1097        return "CMD_UPDATEBUFFER";
1098    case CMD_FILLBUFFER:
1099        return "CMD_FILLBUFFER";
1100    case CMD_CLEARCOLORIMAGE:
1101        return "CMD_CLEARCOLORIMAGE";
1102    case CMD_CLEARATTACHMENTS:
1103        return "CMD_CLEARCOLORATTACHMENT";
1104    case CMD_CLEARDEPTHSTENCILIMAGE:
1105        return "CMD_CLEARDEPTHSTENCILIMAGE";
1106    case CMD_RESOLVEIMAGE:
1107        return "CMD_RESOLVEIMAGE";
1108    case CMD_SETEVENT:
1109        return "CMD_SETEVENT";
1110    case CMD_RESETEVENT:
1111        return "CMD_RESETEVENT";
1112    case CMD_WAITEVENTS:
1113        return "CMD_WAITEVENTS";
1114    case CMD_PIPELINEBARRIER:
1115        return "CMD_PIPELINEBARRIER";
1116    case CMD_BEGINQUERY:
1117        return "CMD_BEGINQUERY";
1118    case CMD_ENDQUERY:
1119        return "CMD_ENDQUERY";
1120    case CMD_RESETQUERYPOOL:
1121        return "CMD_RESETQUERYPOOL";
1122    case CMD_COPYQUERYPOOLRESULTS:
1123        return "CMD_COPYQUERYPOOLRESULTS";
1124    case CMD_WRITETIMESTAMP:
1125        return "CMD_WRITETIMESTAMP";
1126    case CMD_INITATOMICCOUNTERS:
1127        return "CMD_INITATOMICCOUNTERS";
1128    case CMD_LOADATOMICCOUNTERS:
1129        return "CMD_LOADATOMICCOUNTERS";
1130    case CMD_SAVEATOMICCOUNTERS:
1131        return "CMD_SAVEATOMICCOUNTERS";
1132    case CMD_BEGINRENDERPASS:
1133        return "CMD_BEGINRENDERPASS";
1134    case CMD_ENDRENDERPASS:
1135        return "CMD_ENDRENDERPASS";
1136    default:
1137        return "UNKNOWN";
1138    }
1139}
1140
1141// SPIRV utility functions
1142static void build_def_index(shader_module *module) {
1143    for (auto insn : *module) {
1144        switch (insn.opcode()) {
1145        /* Types */
1146        case spv::OpTypeVoid:
1147        case spv::OpTypeBool:
1148        case spv::OpTypeInt:
1149        case spv::OpTypeFloat:
1150        case spv::OpTypeVector:
1151        case spv::OpTypeMatrix:
1152        case spv::OpTypeImage:
1153        case spv::OpTypeSampler:
1154        case spv::OpTypeSampledImage:
1155        case spv::OpTypeArray:
1156        case spv::OpTypeRuntimeArray:
1157        case spv::OpTypeStruct:
1158        case spv::OpTypeOpaque:
1159        case spv::OpTypePointer:
1160        case spv::OpTypeFunction:
1161        case spv::OpTypeEvent:
1162        case spv::OpTypeDeviceEvent:
1163        case spv::OpTypeReserveId:
1164        case spv::OpTypeQueue:
1165        case spv::OpTypePipe:
1166            module->def_index[insn.word(1)] = insn.offset();
1167            break;
1168
1169        /* Fixed constants */
1170        case spv::OpConstantTrue:
1171        case spv::OpConstantFalse:
1172        case spv::OpConstant:
1173        case spv::OpConstantComposite:
1174        case spv::OpConstantSampler:
1175        case spv::OpConstantNull:
1176            module->def_index[insn.word(2)] = insn.offset();
1177            break;
1178
1179        /* Specialization constants */
1180        case spv::OpSpecConstantTrue:
1181        case spv::OpSpecConstantFalse:
1182        case spv::OpSpecConstant:
1183        case spv::OpSpecConstantComposite:
1184        case spv::OpSpecConstantOp:
1185            module->def_index[insn.word(2)] = insn.offset();
1186            break;
1187
1188        /* Variables */
1189        case spv::OpVariable:
1190            module->def_index[insn.word(2)] = insn.offset();
1191            break;
1192
1193        /* Functions */
1194        case spv::OpFunction:
1195            module->def_index[insn.word(2)] = insn.offset();
1196            break;
1197
1198        default:
1199            /* We don't care about any other defs for now. */
1200            break;
1201        }
1202    }
1203}
1204
1205static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
1206    for (auto insn : *src) {
1207        if (insn.opcode() == spv::OpEntryPoint) {
1208            auto entrypointName = (char const *)&insn.word(3);
1209            auto entrypointStageBits = 1u << insn.word(1);
1210
1211            if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
1212                return insn;
1213            }
1214        }
1215    }
1216
1217    return src->end();
1218}
1219
1220bool shader_is_spirv(VkShaderModuleCreateInfo const *pCreateInfo) {
1221    uint32_t *words = (uint32_t *)pCreateInfo->pCode;
1222    size_t sizeInWords = pCreateInfo->codeSize / sizeof(uint32_t);
1223
1224    /* Just validate that the header makes sense. */
1225    return sizeInWords >= 5 && words[0] == spv::MagicNumber && words[1] == spv::Version;
1226}
1227
1228static char const *storage_class_name(unsigned sc) {
1229    switch (sc) {
1230    case spv::StorageClassInput:
1231        return "input";
1232    case spv::StorageClassOutput:
1233        return "output";
1234    case spv::StorageClassUniformConstant:
1235        return "const uniform";
1236    case spv::StorageClassUniform:
1237        return "uniform";
1238    case spv::StorageClassWorkgroup:
1239        return "workgroup local";
1240    case spv::StorageClassCrossWorkgroup:
1241        return "workgroup global";
1242    case spv::StorageClassPrivate:
1243        return "private global";
1244    case spv::StorageClassFunction:
1245        return "function";
1246    case spv::StorageClassGeneric:
1247        return "generic";
1248    case spv::StorageClassAtomicCounter:
1249        return "atomic counter";
1250    case spv::StorageClassImage:
1251        return "image";
1252    case spv::StorageClassPushConstant:
1253        return "push constant";
1254    default:
1255        return "unknown";
1256    }
1257}
1258
1259/* get the value of an integral constant */
1260unsigned get_constant_value(shader_module const *src, unsigned id) {
1261    auto value = src->get_def(id);
1262    assert(value != src->end());
1263
1264    if (value.opcode() != spv::OpConstant) {
1265        /* TODO: Either ensure that the specialization transform is already performed on a module we're
1266            considering here, OR -- specialize on the fly now.
1267            */
1268        return 1;
1269    }
1270
1271    return value.word(3);
1272}
1273
1274
1275static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
1276    auto insn = src->get_def(type);
1277    assert(insn != src->end());
1278
1279    switch (insn.opcode()) {
1280    case spv::OpTypeBool:
1281        ss << "bool";
1282        break;
1283    case spv::OpTypeInt:
1284        ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
1285        break;
1286    case spv::OpTypeFloat:
1287        ss << "float" << insn.word(2);
1288        break;
1289    case spv::OpTypeVector:
1290        ss << "vec" << insn.word(3) << " of ";
1291        describe_type_inner(ss, src, insn.word(2));
1292        break;
1293    case spv::OpTypeMatrix:
1294        ss << "mat" << insn.word(3) << " of ";
1295        describe_type_inner(ss, src, insn.word(2));
1296        break;
1297    case spv::OpTypeArray:
1298        ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
1299        describe_type_inner(ss, src, insn.word(2));
1300        break;
1301    case spv::OpTypePointer:
1302        ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1303        describe_type_inner(ss, src, insn.word(3));
1304        break;
1305    case spv::OpTypeStruct: {
1306        ss << "struct of (";
1307        for (unsigned i = 2; i < insn.len(); i++) {
1308            describe_type_inner(ss, src, insn.word(i));
1309            if (i == insn.len() - 1) {
1310                ss << ")";
1311            } else {
1312                ss << ", ";
1313            }
1314        }
1315        break;
1316    }
1317    case spv::OpTypeSampler:
1318        ss << "sampler";
1319        break;
1320    case spv::OpTypeSampledImage:
1321        ss << "sampler+";
1322        describe_type_inner(ss, src, insn.word(2));
1323        break;
1324    case spv::OpTypeImage:
1325        ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1326        break;
1327    default:
1328        ss << "oddtype";
1329        break;
1330    }
1331}
1332
1333
1334static std::string describe_type(shader_module const *src, unsigned type) {
1335    std::ostringstream ss;
1336    describe_type_inner(ss, src, type);
1337    return ss.str();
1338}
1339
1340
1341static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool b_arrayed) {
1342    /* walk two type trees together, and complain about differences */
1343    auto a_insn = a->get_def(a_type);
1344    auto b_insn = b->get_def(b_type);
1345    assert(a_insn != a->end());
1346    assert(b_insn != b->end());
1347
1348    if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1349        /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */
1350        return types_match(a, b, a_type, b_insn.word(2), false);
1351    }
1352
1353    if (a_insn.opcode() != b_insn.opcode()) {
1354        return false;
1355    }
1356
1357    switch (a_insn.opcode()) {
1358    /* if b_arrayed and we hit a leaf type, then we can't match -- there's nowhere for the extra OpTypeArray to be! */
1359    case spv::OpTypeBool:
1360        return true && !b_arrayed;
1361    case spv::OpTypeInt:
1362        /* match on width, signedness */
1363        return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3) && !b_arrayed;
1364    case spv::OpTypeFloat:
1365        /* match on width */
1366        return a_insn.word(2) == b_insn.word(2) && !b_arrayed;
1367    case spv::OpTypeVector:
1368    case spv::OpTypeMatrix:
1369        /* match on element type, count. these all have the same layout. we don't get here if
1370         * b_arrayed -- that is handled above. */
1371        return !b_arrayed && types_match(a, b, a_insn.word(2), b_insn.word(2), b_arrayed) && a_insn.word(3) == b_insn.word(3);
1372    case spv::OpTypeArray:
1373        /* match on element type, count. these all have the same layout. we don't get here if
1374         * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction,
1375         * not a literal within OpTypeArray */
1376        return !b_arrayed && types_match(a, b, a_insn.word(2), b_insn.word(2), b_arrayed) &&
1377               get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1378    case spv::OpTypeStruct:
1379        /* match on all element types */
1380        {
1381            if (b_arrayed) {
1382                /* for the purposes of matching different levels of arrayness, structs are leaves. */
1383                return false;
1384            }
1385
1386            if (a_insn.len() != b_insn.len()) {
1387                return false; /* structs cannot match if member counts differ */
1388            }
1389
1390            for (unsigned i = 2; i < a_insn.len(); i++) {
1391                if (!types_match(a, b, a_insn.word(i), b_insn.word(i), b_arrayed)) {
1392                    return false;
1393                }
1394            }
1395
1396            return true;
1397        }
1398    case spv::OpTypePointer:
1399        /* match on pointee type. storage class is expected to differ */
1400        return types_match(a, b, a_insn.word(3), b_insn.word(3), b_arrayed);
1401
1402    default:
1403        /* remaining types are CLisms, or may not appear in the interfaces we
1404         * are interested in. Just claim no match.
1405         */
1406        return false;
1407    }
1408}
1409
1410static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1411    auto it = map.find(id);
1412    if (it == map.end())
1413        return def;
1414    else
1415        return it->second;
1416}
1417
1418static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1419    auto insn = src->get_def(type);
1420    assert(insn != src->end());
1421
1422    switch (insn.opcode()) {
1423    case spv::OpTypePointer:
1424        /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
1425         * we're never actually passing pointers around. */
1426        return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1427    case spv::OpTypeArray:
1428        if (strip_array_level) {
1429            return get_locations_consumed_by_type(src, insn.word(2), false);
1430        } else {
1431            return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1432        }
1433    case spv::OpTypeMatrix:
1434        /* num locations is the dimension * element size */
1435        return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1436    default:
1437        /* everything else is just 1. */
1438        return 1;
1439
1440        /* TODO: extend to handle 64bit scalar types, whose vectors may need
1441         * multiple locations. */
1442    }
1443}
1444
1445typedef std::pair<unsigned, unsigned> location_t;
1446typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1447
1448struct interface_var {
1449    uint32_t id;
1450    uint32_t type_id;
1451    uint32_t offset;
1452    /* TODO: collect the name, too? Isn't required to be present. */
1453};
1454
1455static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1456    while (true) {
1457
1458        if (def.opcode() == spv::OpTypePointer) {
1459            def = src->get_def(def.word(3));
1460        } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1461            def = src->get_def(def.word(2));
1462            is_array_of_verts = false;
1463        } else if (def.opcode() == spv::OpTypeStruct) {
1464            return def;
1465        } else {
1466            return src->end();
1467        }
1468    }
1469}
1470
1471static void collect_interface_block_members(layer_data *my_data, VkDevice dev, shader_module const *src,
1472                                            std::map<location_t, interface_var> &out,
1473                                            std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1474                                            uint32_t id, uint32_t type_id) {
1475    /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */
1476    auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts);
1477    if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1478        /* this isn't an interface block. */
1479        return;
1480    }
1481
1482    std::unordered_map<unsigned, unsigned> member_components;
1483
1484    /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */
1485    for (auto insn : *src) {
1486        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1487            unsigned member_index = insn.word(2);
1488
1489            if (insn.word(3) == spv::DecorationComponent) {
1490                unsigned component = insn.word(4);
1491                member_components[member_index] = component;
1492            }
1493        }
1494    }
1495
1496    /* Second pass -- produce the output, from Location decorations */
1497    for (auto insn : *src) {
1498        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1499            unsigned member_index = insn.word(2);
1500            unsigned member_type_id = type.word(2 + member_index);
1501
1502            if (insn.word(3) == spv::DecorationLocation) {
1503                unsigned location = insn.word(4);
1504                unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1505                auto component_it = member_components.find(member_index);
1506                unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1507
1508                for (unsigned int offset = 0; offset < num_locations; offset++) {
1509                    interface_var v;
1510                    v.id = id;
1511                    /* TODO: member index in interface_var too? */
1512                    v.type_id = member_type_id;
1513                    v.offset = offset;
1514                    out[std::make_pair(location + offset, component)] = v;
1515                }
1516            }
1517        }
1518    }
1519}
1520
1521static void collect_interface_by_location(layer_data *my_data, VkDevice dev, shader_module const *src, spirv_inst_iter entrypoint,
1522                                          spv::StorageClass sinterface, std::map<location_t, interface_var> &out,
1523                                          bool is_array_of_verts) {
1524    std::unordered_map<unsigned, unsigned> var_locations;
1525    std::unordered_map<unsigned, unsigned> var_builtins;
1526    std::unordered_map<unsigned, unsigned> var_components;
1527    std::unordered_map<unsigned, unsigned> blocks;
1528
1529    for (auto insn : *src) {
1530
1531        /* We consider two interface models: SSO rendezvous-by-location, and
1532         * builtins. Complain about anything that fits neither model.
1533         */
1534        if (insn.opcode() == spv::OpDecorate) {
1535            if (insn.word(2) == spv::DecorationLocation) {
1536                var_locations[insn.word(1)] = insn.word(3);
1537            }
1538
1539            if (insn.word(2) == spv::DecorationBuiltIn) {
1540                var_builtins[insn.word(1)] = insn.word(3);
1541            }
1542
1543            if (insn.word(2) == spv::DecorationComponent) {
1544                var_components[insn.word(1)] = insn.word(3);
1545            }
1546
1547            if (insn.word(2) == spv::DecorationBlock) {
1548                blocks[insn.word(1)] = 1;
1549            }
1550        }
1551    }
1552
1553    /* TODO: handle grouped decorations */
1554    /* TODO: handle index=1 dual source outputs from FS -- two vars will
1555     * have the same location, and we DONT want to clobber. */
1556
1557    /* find the end of the entrypoint's name string. additional zero bytes follow the actual null
1558       terminator, to fill out the rest of the word - so we only need to look at the last byte in
1559       the word to determine which word contains the terminator. */
1560    auto word = 3;
1561    while (entrypoint.word(word) & 0xff000000u) {
1562        ++word;
1563    }
1564    ++word;
1565
1566    for (; word < entrypoint.len(); word++) {
1567        auto insn = src->get_def(entrypoint.word(word));
1568        assert(insn != src->end());
1569        assert(insn.opcode() == spv::OpVariable);
1570
1571        if (insn.word(3) == sinterface) {
1572            unsigned id = insn.word(2);
1573            unsigned type = insn.word(1);
1574
1575            int location = value_or_default(var_locations, id, -1);
1576            int builtin = value_or_default(var_builtins, id, -1);
1577            unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */
1578
1579            /* All variables and interface block members in the Input or Output storage classes
1580             * must be decorated with either a builtin or an explicit location.
1581             *
1582             * TODO: integrate the interface block support here. For now, don't complain --
1583             * a valid SPIRV module will only hit this path for the interface block case, as the
1584             * individual members of the type are decorated, rather than variable declarations.
1585             */
1586
1587            if (location != -1) {
1588                /* A user-defined interface variable, with a location. Where a variable
1589                 * occupied multiple locations, emit one result for each. */
1590                unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts);
1591                for (unsigned int offset = 0; offset < num_locations; offset++) {
1592                    interface_var v;
1593                    v.id = id;
1594                    v.type_id = type;
1595                    v.offset = offset;
1596                    out[std::make_pair(location + offset, component)] = v;
1597                }
1598            } else if (builtin == -1) {
1599                /* An interface block instance */
1600                collect_interface_block_members(my_data, dev, src, out, blocks, is_array_of_verts, id, type);
1601            }
1602        }
1603    }
1604}
1605
1606static void collect_interface_by_descriptor_slot(layer_data *my_data, VkDevice dev, shader_module const *src,
1607                                                 std::unordered_set<uint32_t> const &accessible_ids,
1608                                                 std::map<descriptor_slot_t, interface_var> &out) {
1609
1610    std::unordered_map<unsigned, unsigned> var_sets;
1611    std::unordered_map<unsigned, unsigned> var_bindings;
1612
1613    for (auto insn : *src) {
1614        /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1615         * DecorationDescriptorSet and DecorationBinding.
1616         */
1617        if (insn.opcode() == spv::OpDecorate) {
1618            if (insn.word(2) == spv::DecorationDescriptorSet) {
1619                var_sets[insn.word(1)] = insn.word(3);
1620            }
1621
1622            if (insn.word(2) == spv::DecorationBinding) {
1623                var_bindings[insn.word(1)] = insn.word(3);
1624            }
1625        }
1626    }
1627
1628    for (auto id : accessible_ids) {
1629        auto insn = src->get_def(id);
1630        assert(insn != src->end());
1631
1632        if (insn.opcode() == spv::OpVariable &&
1633            (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1634            unsigned set = value_or_default(var_sets, insn.word(2), 0);
1635            unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1636
1637            auto existing_it = out.find(std::make_pair(set, binding));
1638            if (existing_it != out.end()) {
1639                /* conflict within spv image */
1640                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1641                        __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
1642                        "var %d (type %d) in %s interface in descriptor slot (%u,%u) conflicts with existing definition",
1643                        insn.word(2), insn.word(1), storage_class_name(insn.word(3)), existing_it->first.first,
1644                        existing_it->first.second);
1645            }
1646
1647            interface_var v;
1648            v.id = insn.word(2);
1649            v.type_id = insn.word(1);
1650            out[std::make_pair(set, binding)] = v;
1651        }
1652    }
1653}
1654
1655static bool validate_interface_between_stages(layer_data *my_data, VkDevice dev, shader_module const *producer,
1656                                              spirv_inst_iter producer_entrypoint, char const *producer_name,
1657                                              shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1658                                              char const *consumer_name, bool consumer_arrayed_input) {
1659    std::map<location_t, interface_var> outputs;
1660    std::map<location_t, interface_var> inputs;
1661
1662    bool pass = true;
1663
1664    collect_interface_by_location(my_data, dev, producer, producer_entrypoint, spv::StorageClassOutput, outputs, false);
1665    collect_interface_by_location(my_data, dev, consumer, consumer_entrypoint, spv::StorageClassInput, inputs,
1666                                  consumer_arrayed_input);
1667
1668    auto a_it = outputs.begin();
1669    auto b_it = inputs.begin();
1670
1671    /* maps sorted by key (location); walk them together to find mismatches */
1672    while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1673        bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1674        bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1675        auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1676        auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1677
1678        if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1679            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
1680                        /*dev*/ 0, __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1681                        "%s writes to output location %u.%u which is not consumed by %s", producer_name, a_first.first,
1682                        a_first.second, consumer_name)) {
1683                pass = false;
1684            }
1685            a_it++;
1686        } else if (a_at_end || a_first > b_first) {
1687            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1688                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
1689                        "%s consumes input location %u.%u which is not written by %s", consumer_name, b_first.first, b_first.second,
1690                        producer_name)) {
1691                pass = false;
1692            }
1693            b_it++;
1694        } else {
1695            if (types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id, consumer_arrayed_input)) {
1696                /* OK! */
1697            } else {
1698                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1699                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1700                            a_first.first, a_first.second,
1701                            describe_type(producer, a_it->second.type_id).c_str(),
1702                            describe_type(consumer, b_it->second.type_id).c_str())) {
1703                    pass = false;
1704                }
1705            }
1706            a_it++;
1707            b_it++;
1708        }
1709    }
1710
1711    return pass;
1712}
1713
1714enum FORMAT_TYPE {
1715    FORMAT_TYPE_UNDEFINED,
1716    FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
1717    FORMAT_TYPE_SINT,
1718    FORMAT_TYPE_UINT,
1719};
1720
1721static unsigned get_format_type(VkFormat fmt) {
1722    switch (fmt) {
1723    case VK_FORMAT_UNDEFINED:
1724        return FORMAT_TYPE_UNDEFINED;
1725    case VK_FORMAT_R8_SINT:
1726    case VK_FORMAT_R8G8_SINT:
1727    case VK_FORMAT_R8G8B8_SINT:
1728    case VK_FORMAT_R8G8B8A8_SINT:
1729    case VK_FORMAT_R16_SINT:
1730    case VK_FORMAT_R16G16_SINT:
1731    case VK_FORMAT_R16G16B16_SINT:
1732    case VK_FORMAT_R16G16B16A16_SINT:
1733    case VK_FORMAT_R32_SINT:
1734    case VK_FORMAT_R32G32_SINT:
1735    case VK_FORMAT_R32G32B32_SINT:
1736    case VK_FORMAT_R32G32B32A32_SINT:
1737    case VK_FORMAT_B8G8R8_SINT:
1738    case VK_FORMAT_B8G8R8A8_SINT:
1739    case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1740    case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1741        return FORMAT_TYPE_SINT;
1742    case VK_FORMAT_R8_UINT:
1743    case VK_FORMAT_R8G8_UINT:
1744    case VK_FORMAT_R8G8B8_UINT:
1745    case VK_FORMAT_R8G8B8A8_UINT:
1746    case VK_FORMAT_R16_UINT:
1747    case VK_FORMAT_R16G16_UINT:
1748    case VK_FORMAT_R16G16B16_UINT:
1749    case VK_FORMAT_R16G16B16A16_UINT:
1750    case VK_FORMAT_R32_UINT:
1751    case VK_FORMAT_R32G32_UINT:
1752    case VK_FORMAT_R32G32B32_UINT:
1753    case VK_FORMAT_R32G32B32A32_UINT:
1754    case VK_FORMAT_B8G8R8_UINT:
1755    case VK_FORMAT_B8G8R8A8_UINT:
1756    case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1757    case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1758        return FORMAT_TYPE_UINT;
1759    default:
1760        return FORMAT_TYPE_FLOAT;
1761    }
1762}
1763
1764/* characterizes a SPIR-V type appearing in an interface to a FF stage,
1765 * for comparison to a VkFormat's characterization above. */
1766static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1767    auto insn = src->get_def(type);
1768    assert(insn != src->end());
1769
1770    switch (insn.opcode()) {
1771    case spv::OpTypeInt:
1772        return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1773    case spv::OpTypeFloat:
1774        return FORMAT_TYPE_FLOAT;
1775    case spv::OpTypeVector:
1776        return get_fundamental_type(src, insn.word(2));
1777    case spv::OpTypeMatrix:
1778        return get_fundamental_type(src, insn.word(2));
1779    case spv::OpTypeArray:
1780        return get_fundamental_type(src, insn.word(2));
1781    case spv::OpTypePointer:
1782        return get_fundamental_type(src, insn.word(3));
1783    default:
1784        return FORMAT_TYPE_UNDEFINED;
1785    }
1786}
1787
1788static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1789    uint32_t bit_pos = u_ffs(stage);
1790    return bit_pos - 1;
1791}
1792
1793static bool validate_vi_consistency(layer_data *my_data, VkDevice dev, VkPipelineVertexInputStateCreateInfo const *vi) {
1794    /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
1795     * each binding should be specified only once.
1796     */
1797    std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1798    bool pass = true;
1799
1800    for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1801        auto desc = &vi->pVertexBindingDescriptions[i];
1802        auto &binding = bindings[desc->binding];
1803        if (binding) {
1804            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1805                        __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1806                        "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1807                pass = false;
1808            }
1809        } else {
1810            binding = desc;
1811        }
1812    }
1813
1814    return pass;
1815}
1816
1817static bool validate_vi_against_vs_inputs(layer_data *my_data, VkDevice dev, VkPipelineVertexInputStateCreateInfo const *vi,
1818                                          shader_module const *vs, spirv_inst_iter entrypoint) {
1819    std::map<location_t, interface_var> inputs;
1820    bool pass = true;
1821
1822    collect_interface_by_location(my_data, dev, vs, entrypoint, spv::StorageClassInput, inputs, false);
1823
1824    /* Build index by location */
1825    std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1826    if (vi) {
1827        for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++)
1828            attribs[vi->pVertexAttributeDescriptions[i].location] = &vi->pVertexAttributeDescriptions[i];
1829    }
1830
1831    auto it_a = attribs.begin();
1832    auto it_b = inputs.begin();
1833
1834    while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1835        bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1836        bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1837        auto a_first = a_at_end ? 0 : it_a->first;
1838        auto b_first = b_at_end ? 0 : it_b->first.first;
1839        if (!a_at_end && (b_at_end || a_first < b_first)) {
1840            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
1841                        /*dev*/ 0, __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1842                        "Vertex attribute at location %d not consumed by VS", a_first)) {
1843                pass = false;
1844            }
1845            it_a++;
1846        } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1847            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1848                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "VS consumes input at location %d but not provided",
1849                        b_first)) {
1850                pass = false;
1851            }
1852            it_b++;
1853        } else {
1854            unsigned attrib_type = get_format_type(it_a->second->format);
1855            unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1856
1857            /* type checking */
1858            if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1859                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1860                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1861                            "Attribute type of `%s` at location %d does not match VS input type of `%s`",
1862                            string_VkFormat(it_a->second->format), a_first,
1863                            describe_type(vs, it_b->second.type_id).c_str())) {
1864                    pass = false;
1865                }
1866            }
1867
1868            /* OK! */
1869            it_a++;
1870            it_b++;
1871        }
1872    }
1873
1874    return pass;
1875}
1876
1877static bool validate_fs_outputs_against_render_pass(layer_data *my_data, VkDevice dev, shader_module const *fs,
1878                                                    spirv_inst_iter entrypoint, RENDER_PASS_NODE const *rp, uint32_t subpass) {
1879    const std::vector<VkFormat> &color_formats = rp->subpassColorFormats[subpass];
1880    std::map<location_t, interface_var> outputs;
1881    bool pass = true;
1882
1883    /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
1884
1885    collect_interface_by_location(my_data, dev, fs, entrypoint, spv::StorageClassOutput, outputs, false);
1886
1887    auto it = outputs.begin();
1888    uint32_t attachment = 0;
1889
1890    /* Walk attachment list and outputs together -- this is a little overpowered since attachments
1891     * are currently dense, but the parallel with matching between shader stages is nice.
1892     */
1893
1894    while ((outputs.size() > 0 && it != outputs.end()) || attachment < color_formats.size()) {
1895        if (attachment == color_formats.size() || (it != outputs.end() && it->first.first < attachment)) {
1896            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1897                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1898                        "FS writes to output location %d with no matching attachment", it->first.first)) {
1899                pass = false;
1900            }
1901            it++;
1902        } else if (it == outputs.end() || it->first.first > attachment) {
1903            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1904                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by FS", attachment)) {
1905                pass = false;
1906            }
1907            attachment++;
1908        } else {
1909            unsigned output_type = get_fundamental_type(fs, it->second.type_id);
1910            unsigned att_type = get_format_type(color_formats[attachment]);
1911
1912            /* type checking */
1913            if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
1914                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1915                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1916                            "Attachment %d of type `%s` does not match FS output type of `%s`", attachment,
1917                            string_VkFormat(color_formats[attachment]),
1918                            describe_type(fs, it->second.type_id).c_str())) {
1919                    pass = false;
1920                }
1921            }
1922
1923            /* OK! */
1924            it++;
1925            attachment++;
1926        }
1927    }
1928
1929    return pass;
1930}
1931
1932/* For some analyses, we need to know about all ids referenced by the static call tree of a particular
1933 * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint,
1934 * for example.
1935 * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
1936 *  - NOT the shader input/output interfaces.
1937 *
1938 * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
1939 * converting parts of this to be generated from the machine-readable spec instead.
1940 */
1941static void mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint, std::unordered_set<uint32_t> &ids) {
1942    std::unordered_set<uint32_t> worklist;
1943    worklist.insert(entrypoint.word(2));
1944
1945    while (!worklist.empty()) {
1946        auto id_iter = worklist.begin();
1947        auto id = *id_iter;
1948        worklist.erase(id_iter);
1949
1950        auto insn = src->get_def(id);
1951        if (insn == src->end()) {
1952            /* id is something we didnt collect in build_def_index. that's OK -- we'll stumble
1953             * across all kinds of things here that we may not care about. */
1954            continue;
1955        }
1956
1957        /* try to add to the output set */
1958        if (!ids.insert(id).second) {
1959            continue; /* if we already saw this id, we don't want to walk it again. */
1960        }
1961
1962        switch (insn.opcode()) {
1963        case spv::OpFunction:
1964            /* scan whole body of the function, enlisting anything interesting */
1965            while (++insn, insn.opcode() != spv::OpFunctionEnd) {
1966                switch (insn.opcode()) {
1967                case spv::OpLoad:
1968                case spv::OpAtomicLoad:
1969                case spv::OpAtomicExchange:
1970                case spv::OpAtomicCompareExchange:
1971                case spv::OpAtomicCompareExchangeWeak:
1972                case spv::OpAtomicIIncrement:
1973                case spv::OpAtomicIDecrement:
1974                case spv::OpAtomicIAdd:
1975                case spv::OpAtomicISub:
1976                case spv::OpAtomicSMin:
1977                case spv::OpAtomicUMin:
1978                case spv::OpAtomicSMax:
1979                case spv::OpAtomicUMax:
1980                case spv::OpAtomicAnd:
1981                case spv::OpAtomicOr:
1982                case spv::OpAtomicXor:
1983                    worklist.insert(insn.word(3)); /* ptr */
1984                    break;
1985                case spv::OpStore:
1986                case spv::OpAtomicStore:
1987                    worklist.insert(insn.word(1)); /* ptr */
1988                    break;
1989                case spv::OpAccessChain:
1990                case spv::OpInBoundsAccessChain:
1991                    worklist.insert(insn.word(3)); /* base ptr */
1992                    break;
1993                case spv::OpSampledImage:
1994                case spv::OpImageSampleImplicitLod:
1995                case spv::OpImageSampleExplicitLod:
1996                case spv::OpImageSampleDrefImplicitLod:
1997                case spv::OpImageSampleDrefExplicitLod:
1998                case spv::OpImageSampleProjImplicitLod:
1999                case spv::OpImageSampleProjExplicitLod:
2000                case spv::OpImageSampleProjDrefImplicitLod:
2001                case spv::OpImageSampleProjDrefExplicitLod:
2002                case spv::OpImageFetch:
2003                case spv::OpImageGather:
2004                case spv::OpImageDrefGather:
2005                case spv::OpImageRead:
2006                case spv::OpImage:
2007                case spv::OpImageQueryFormat:
2008                case spv::OpImageQueryOrder:
2009                case spv::OpImageQuerySizeLod:
2010                case spv::OpImageQuerySize:
2011                case spv::OpImageQueryLod:
2012                case spv::OpImageQueryLevels:
2013                case spv::OpImageQuerySamples:
2014                case spv::OpImageSparseSampleImplicitLod:
2015                case spv::OpImageSparseSampleExplicitLod:
2016                case spv::OpImageSparseSampleDrefImplicitLod:
2017                case spv::OpImageSparseSampleDrefExplicitLod:
2018                case spv::OpImageSparseSampleProjImplicitLod:
2019                case spv::OpImageSparseSampleProjExplicitLod:
2020                case spv::OpImageSparseSampleProjDrefImplicitLod:
2021                case spv::OpImageSparseSampleProjDrefExplicitLod:
2022                case spv::OpImageSparseFetch:
2023                case spv::OpImageSparseGather:
2024                case spv::OpImageSparseDrefGather:
2025                case spv::OpImageTexelPointer:
2026                    worklist.insert(insn.word(3)); /* image or sampled image */
2027                    break;
2028                case spv::OpImageWrite:
2029                    worklist.insert(insn.word(1)); /* image -- different operand order to above */
2030                    break;
2031                case spv::OpFunctionCall:
2032                    for (auto i = 3; i < insn.len(); i++) {
2033                        worklist.insert(insn.word(i)); /* fn itself, and all args */
2034                    }
2035                    break;
2036
2037                case spv::OpExtInst:
2038                    for (auto i = 5; i < insn.len(); i++) {
2039                        worklist.insert(insn.word(i)); /* operands to ext inst */
2040                    }
2041                    break;
2042                }
2043            }
2044            break;
2045        }
2046    }
2047}
2048
2049struct shader_stage_attributes {
2050    char const *const name;
2051    bool arrayed_input;
2052};
2053
2054static shader_stage_attributes shader_stage_attribs[] = {
2055    {"vertex shader", false},
2056    {"tessellation control shader", true},
2057    {"tessellation evaluation shader", false},
2058    {"geometry shader", true},
2059    {"fragment shader", false},
2060};
2061
2062static bool validate_push_constant_block_against_pipeline(layer_data *my_data, VkDevice dev,
2063                                                          std::vector<VkPushConstantRange> const *pushConstantRanges,
2064                                                          shader_module const *src, spirv_inst_iter type,
2065                                                          VkShaderStageFlagBits stage) {
2066    bool pass = true;
2067
2068    /* strip off ptrs etc */
2069    type = get_struct_type(src, type, false);
2070    assert(type != src->end());
2071
2072    /* validate directly off the offsets. this isn't quite correct for arrays
2073     * and matrices, but is a good first step. TODO: arrays, matrices, weird
2074     * sizes */
2075    for (auto insn : *src) {
2076        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
2077
2078            if (insn.word(3) == spv::DecorationOffset) {
2079                unsigned offset = insn.word(4);
2080                auto size = 4; /* bytes; TODO: calculate this based on the type */
2081
2082                bool found_range = false;
2083                for (auto const &range : *pushConstantRanges) {
2084                    if (range.offset <= offset && range.offset + range.size >= offset + size) {
2085                        found_range = true;
2086
2087                        if ((range.stageFlags & stage) == 0) {
2088                            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2089                                        /* dev */ 0, __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2090                                        "Push constant range covering variable starting at "
2091                                        "offset %u not accessible from stage %s",
2092                                        offset, string_VkShaderStageFlagBits(stage))) {
2093                                pass = false;
2094                            }
2095                        }
2096
2097                        break;
2098                    }
2099                }
2100
2101                if (!found_range) {
2102                    if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2103                                /* dev */ 0, __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
2104                                "Push constant range covering variable starting at "
2105                                "offset %u not declared in layout",
2106                                offset)) {
2107                        pass = false;
2108                    }
2109                }
2110            }
2111        }
2112    }
2113
2114    return pass;
2115}
2116
2117static bool validate_push_constant_usage(layer_data *my_data, VkDevice dev,
2118                                         std::vector<VkPushConstantRange> const *pushConstantRanges, shader_module const *src,
2119                                         std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
2120    bool pass = true;
2121
2122    for (auto id : accessible_ids) {
2123        auto def_insn = src->get_def(id);
2124        if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
2125            pass = validate_push_constant_block_against_pipeline(my_data, dev, pushConstantRanges, src,
2126                                                                 src->get_def(def_insn.word(1)), stage) &&
2127                   pass;
2128        }
2129    }
2130
2131    return pass;
2132}
2133
2134// For given pipelineLayout verify that the setLayout at slot.first
2135//  has the requested binding at slot.second
2136static VkDescriptorSetLayoutBinding const * get_descriptor_binding(layer_data *my_data, vector<VkDescriptorSetLayout> *pipelineLayout, descriptor_slot_t slot) {
2137
2138    if (!pipelineLayout)
2139        return nullptr;
2140
2141    if (slot.first >= pipelineLayout->size())
2142        return nullptr;
2143
2144    auto const layout_node = my_data->descriptorSetLayoutMap[(*pipelineLayout)[slot.first]];
2145
2146    auto bindingIt = layout_node->bindingToIndexMap.find(slot.second);
2147    if ((bindingIt == layout_node->bindingToIndexMap.end()) || (layout_node->createInfo.pBindings == NULL))
2148        return nullptr;
2149
2150    assert(bindingIt->second < layout_node->createInfo.bindingCount);
2151    return &layout_node->createInfo.pBindings[bindingIt->second];
2152}
2153
2154// Block of code at start here for managing/tracking Pipeline state that this layer cares about
2155
2156static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
2157
2158// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
2159//   Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
2160//   to that same cmd buffer by separate thread are not changing state from underneath us
2161// Track the last cmd buffer touched by this thread
2162
2163static VkBool32 hasDrawCmd(GLOBAL_CB_NODE *pCB) {
2164    for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
2165        if (pCB->drawCount[i])
2166            return VK_TRUE;
2167    }
2168    return VK_FALSE;
2169}
2170
2171// Check object status for selected flag state
2172static VkBool32 validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags enable_mask, CBStatusFlags status_mask,
2173                                CBStatusFlags status_flag, VkFlags msg_flags, DRAW_STATE_ERROR error_code, const char *fail_msg) {
2174    // If non-zero enable mask is present, check it against status but if enable_mask
2175    //  is 0 then no enable required so we should always just check status
2176    if ((!enable_mask) || (enable_mask & pNode->status)) {
2177        if ((pNode->status & status_mask) != status_flag) {
2178            // TODO : How to pass dispatchable objects as srcObject? Here src obj should be cmd buffer
2179            return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, error_code,
2180                           "DS", "CB object %#" PRIxLEAST64 ": %s", (uint64_t)(pNode->commandBuffer), fail_msg);
2181        }
2182    }
2183    return VK_FALSE;
2184}
2185
2186// Retrieve pipeline node ptr for given pipeline object
2187static PIPELINE_NODE *getPipeline(layer_data *my_data, const VkPipeline pipeline) {
2188    if (my_data->pipelineMap.find(pipeline) == my_data->pipelineMap.end()) {
2189        return NULL;
2190    }
2191    return my_data->pipelineMap[pipeline];
2192}
2193
2194// Return VK_TRUE if for a given PSO, the given state enum is dynamic, else return VK_FALSE
2195static VkBool32 isDynamic(const PIPELINE_NODE *pPipeline, const VkDynamicState state) {
2196    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2197        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2198            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
2199                return VK_TRUE;
2200        }
2201    }
2202    return VK_FALSE;
2203}
2204
2205// Validate state stored as flags at time of draw call
2206static VkBool32 validate_draw_state_flags(layer_data *my_data, GLOBAL_CB_NODE *pCB, VkBool32 indexedDraw) {
2207    VkBool32 result;
2208    result =
2209        validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_VIEWPORT_SET, CBSTATUS_VIEWPORT_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2210                        DRAWSTATE_VIEWPORT_NOT_BOUND, "Dynamic viewport state not set for this command buffer");
2211    result |=
2212        validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_SCISSOR_SET, CBSTATUS_SCISSOR_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2213                        DRAWSTATE_SCISSOR_NOT_BOUND, "Dynamic scissor state not set for this command buffer");
2214    result |= validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_LINE_WIDTH_SET, CBSTATUS_LINE_WIDTH_SET,
2215                              VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_LINE_WIDTH_NOT_BOUND,
2216                              "Dynamic line width state not set for this command buffer");
2217    result |= validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_DEPTH_BIAS_SET, CBSTATUS_DEPTH_BIAS_SET,
2218                              VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_DEPTH_BIAS_NOT_BOUND,
2219                              "Dynamic depth bias state not set for this command buffer");
2220    result |= validate_status(my_data, pCB, CBSTATUS_COLOR_BLEND_WRITE_ENABLE, CBSTATUS_BLEND_SET, CBSTATUS_BLEND_SET,
2221                              VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_BLEND_NOT_BOUND,
2222                              "Dynamic blend object state not set for this command buffer");
2223    result |= validate_status(my_data, pCB, CBSTATUS_DEPTH_WRITE_ENABLE, CBSTATUS_DEPTH_BOUNDS_SET, CBSTATUS_DEPTH_BOUNDS_SET,
2224                              VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND,
2225                              "Dynamic depth bounds state not set for this command buffer");
2226    result |= validate_status(my_data, pCB, CBSTATUS_STENCIL_TEST_ENABLE, CBSTATUS_STENCIL_READ_MASK_SET,
2227                              CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_STENCIL_NOT_BOUND,
2228                              "Dynamic stencil read mask state not set for this command buffer");
2229    result |= validate_status(my_data, pCB, CBSTATUS_STENCIL_TEST_ENABLE, CBSTATUS_STENCIL_WRITE_MASK_SET,
2230                              CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_STENCIL_NOT_BOUND,
2231                              "Dynamic stencil write mask state not set for this command buffer");
2232    result |= validate_status(my_data, pCB, CBSTATUS_STENCIL_TEST_ENABLE, CBSTATUS_STENCIL_REFERENCE_SET,
2233                              CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_STENCIL_NOT_BOUND,
2234                              "Dynamic stencil reference state not set for this command buffer");
2235    if (indexedDraw)
2236        result |= validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_INDEX_BUFFER_BOUND, CBSTATUS_INDEX_BUFFER_BOUND,
2237                                  VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_INDEX_BUFFER_NOT_BOUND,
2238                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted");
2239    return result;
2240}
2241
2242// Verify attachment reference compatibility according to spec
2243//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2244//  If both AttachmentReference arrays have requested index, check their corresponding AttachementDescriptions
2245//   to make sure that format and samples counts match.
2246//  If not, they are not compatible.
2247static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2248                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2249                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2250                                             const VkAttachmentDescription *pSecondaryAttachments) {
2251    if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2252        if (VK_ATTACHMENT_UNUSED != pSecondary[index].attachment)
2253            return false;
2254    } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2255        if (VK_ATTACHMENT_UNUSED != pPrimary[index].attachment)
2256            return false;
2257    } else { // format and sample count must match
2258        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2259             pSecondaryAttachments[pSecondary[index].attachment].format) &&
2260            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2261             pSecondaryAttachments[pSecondary[index].attachment].samples))
2262            return true;
2263    }
2264    // Format and sample counts didn't match
2265    return false;
2266}
2267
2268// For give primary and secondary RenderPass objects, verify that they're compatible
2269static bool verify_renderpass_compatibility(layer_data *my_data, const VkRenderPass primaryRP, const VkRenderPass secondaryRP,
2270                                            string &errorMsg) {
2271    stringstream errorStr;
2272    if (my_data->renderPassMap.find(primaryRP) == my_data->renderPassMap.end()) {
2273        errorStr << "invalid VkRenderPass (" << primaryRP << ")";
2274        errorMsg = errorStr.str();
2275        return false;
2276    } else if (my_data->renderPassMap.find(secondaryRP) == my_data->renderPassMap.end()) {
2277        errorStr << "invalid VkRenderPass (" << secondaryRP << ")";
2278        errorMsg = errorStr.str();
2279        return false;
2280    }
2281    // Trivial pass case is exact same RP
2282    if (primaryRP == secondaryRP) {
2283        return true;
2284    }
2285    const VkRenderPassCreateInfo *primaryRPCI = my_data->renderPassMap[primaryRP]->pCreateInfo;
2286    const VkRenderPassCreateInfo *secondaryRPCI = my_data->renderPassMap[secondaryRP]->pCreateInfo;
2287    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2288        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2289                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2290        errorMsg = errorStr.str();
2291        return false;
2292    }
2293    uint32_t spIndex = 0;
2294    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2295        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2296        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2297        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2298        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2299        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2300            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2301                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2302                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2303                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2304                errorMsg = errorStr.str();
2305                return false;
2306            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2307                                                         primaryColorCount, primaryRPCI->pAttachments,
2308                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2309                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2310                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2311                errorMsg = errorStr.str();
2312                return false;
2313            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2314                                                         primaryColorCount, primaryRPCI->pAttachments,
2315                                                         secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2316                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2317                errorStr << "depth/stencil attachments at index " << cIdx << " of subpass index " << spIndex
2318                         << " are not compatible.";
2319                errorMsg = errorStr.str();
2320                return false;
2321            }
2322        }
2323        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2324        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2325        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2326        for (uint32_t i = 0; i < inputMax; ++i) {
2327            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2328                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2329                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2330                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2331                errorMsg = errorStr.str();
2332                return false;
2333            }
2334        }
2335    }
2336    return true;
2337}
2338
2339// For give SET_NODE, verify that its Set is compatible w/ the setLayout corresponding to pipelineLayout[layoutIndex]
2340static bool verify_set_layout_compatibility(layer_data *my_data, const SET_NODE *pSet, const VkPipelineLayout layout,
2341                                            const uint32_t layoutIndex, string &errorMsg) {
2342    stringstream errorStr;
2343    auto pipeline_layout_it = my_data->pipelineLayoutMap.find(layout);
2344    if (pipeline_layout_it == my_data->pipelineLayoutMap.end()) {
2345        errorStr << "invalid VkPipelineLayout (" << layout << ")";
2346        errorMsg = errorStr.str();
2347        return false;
2348    }
2349    if (layoutIndex >= pipeline_layout_it->second.descriptorSetLayouts.size()) {
2350        errorStr << "VkPipelineLayout (" << layout << ") only contains " << pipeline_layout_it->second.descriptorSetLayouts.size()
2351                 << " setLayouts corresponding to sets 0-" << pipeline_layout_it->second.descriptorSetLayouts.size() - 1
2352                 << ", but you're attempting to bind set to index " << layoutIndex;
2353        errorMsg = errorStr.str();
2354        return false;
2355    }
2356    // Get the specific setLayout from PipelineLayout that overlaps this set
2357    LAYOUT_NODE *pLayoutNode = my_data->descriptorSetLayoutMap[pipeline_layout_it->second.descriptorSetLayouts[layoutIndex]];
2358    if (pLayoutNode->layout == pSet->pLayout->layout) { // trivial pass case
2359        return true;
2360    }
2361    size_t descriptorCount = pLayoutNode->descriptorTypes.size();
2362    if (descriptorCount != pSet->pLayout->descriptorTypes.size()) {
2363        errorStr << "setLayout " << layoutIndex << " from pipelineLayout " << layout << " has " << descriptorCount
2364                 << " descriptors, but corresponding set being bound has " << pSet->pLayout->descriptorTypes.size()
2365                 << " descriptors.";
2366        errorMsg = errorStr.str();
2367        return false; // trivial fail case
2368    }
2369    // Now need to check set against corresponding pipelineLayout to verify compatibility
2370    for (size_t i = 0; i < descriptorCount; ++i) {
2371        // Need to verify that layouts are identically defined
2372        //  TODO : Is below sufficient? Making sure that types & stageFlags match per descriptor
2373        //    do we also need to check immutable samplers?
2374        if (pLayoutNode->descriptorTypes[i] != pSet->pLayout->descriptorTypes[i]) {
2375            errorStr << "descriptor " << i << " for descriptorSet being bound is type '"
2376                     << string_VkDescriptorType(pSet->pLayout->descriptorTypes[i])
2377                     << "' but corresponding descriptor from pipelineLayout is type '"
2378                     << string_VkDescriptorType(pLayoutNode->descriptorTypes[i]) << "'";
2379            errorMsg = errorStr.str();
2380            return false;
2381        }
2382        if (pLayoutNode->stageFlags[i] != pSet->pLayout->stageFlags[i]) {
2383            errorStr << "stageFlags " << i << " for descriptorSet being bound is " << pSet->pLayout->stageFlags[i]
2384                     << "' but corresponding descriptor from pipelineLayout has stageFlags " << pLayoutNode->stageFlags[i];
2385            errorMsg = errorStr.str();
2386            return false;
2387        }
2388    }
2389    return true;
2390}
2391
2392// Validate that data for each specialization entry is fully contained within the buffer.
2393static VkBool32 validate_specialization_offsets(layer_data *my_data, VkPipelineShaderStageCreateInfo const *info) {
2394    VkBool32 pass = VK_TRUE;
2395
2396    VkSpecializationInfo const *spec = info->pSpecializationInfo;
2397
2398    if (spec) {
2399        for (auto i = 0u; i < spec->mapEntryCount; i++) {
2400            if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2401                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2402                            /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC",
2403                            "Specialization entry %u (for constant id %u) references memory outside provided "
2404                            "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2405                            " bytes provided)",
2406                            i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2407                            spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) {
2408
2409                    pass = VK_FALSE;
2410                }
2411            }
2412        }
2413    }
2414
2415    return pass;
2416}
2417
2418static bool descriptor_type_match(layer_data *my_data, shader_module const *module, uint32_t type_id,
2419                                  VkDescriptorType descriptor_type, unsigned &descriptor_count) {
2420    auto type = module->get_def(type_id);
2421
2422    descriptor_count = 1;
2423
2424    /* Strip off any array or ptrs. Where we remove array levels, adjust the
2425     * descriptor count for each dimension. */
2426    while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2427        if (type.opcode() == spv::OpTypeArray) {
2428            descriptor_count *= get_constant_value(module, type.word(3));
2429            type = module->get_def(type.word(2));
2430        }
2431        else {
2432            type = module->get_def(type.word(3));
2433        }
2434    }
2435
2436    switch (type.opcode()) {
2437    case spv::OpTypeStruct: {
2438        for (auto insn : *module) {
2439            if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2440                if (insn.word(2) == spv::DecorationBlock) {
2441                    return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2442                           descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2443                } else if (insn.word(2) == spv::DecorationBufferBlock) {
2444                    return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2445                           descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2446                }
2447            }
2448        }
2449
2450        /* Invalid */
2451        return false;
2452    }
2453
2454    case spv::OpTypeSampler:
2455        return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER;
2456
2457    case spv::OpTypeSampledImage:
2458        return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2459
2460    case spv::OpTypeImage: {
2461        /* Many descriptor types backing image types-- depends on dimension
2462         * and whether the image will be used with a sampler. SPIRV for
2463         * Vulkan requires that sampled be 1 or 2 -- leaving the decision to
2464         * runtime is unacceptable.
2465         */
2466        auto dim = type.word(3);
2467        auto sampled = type.word(7);
2468
2469        if (dim == spv::DimSubpassData) {
2470            return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2471        } else if (dim == spv::DimBuffer) {
2472            if (sampled == 1) {
2473                return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2474            } else {
2475                return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2476            }
2477        } else if (sampled == 1) {
2478            return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
2479        } else {
2480            return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2481        }
2482    }
2483
2484    /* We shouldn't really see any other junk types -- but if we do, they're
2485     * a mismatch.
2486     */
2487    default:
2488        return false; /* Mismatch */
2489    }
2490}
2491
2492static VkBool32 require_feature(layer_data *my_data, VkBool32 feature, char const *feature_name) {
2493    if (!feature) {
2494        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2495                    /* dev */ 0, __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2496                    "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2497                    "enabled on the device",
2498                    feature_name)) {
2499            return false;
2500        }
2501    }
2502
2503    return true;
2504}
2505
2506static VkBool32 validate_shader_capabilities(layer_data *my_data, VkDevice dev, shader_module const *src)
2507{
2508    VkBool32 pass = VK_TRUE;
2509
2510    auto enabledFeatures = &my_data->physDevProperties.features;
2511
2512    for (auto insn : *src) {
2513        if (insn.opcode() == spv::OpCapability) {
2514            switch (insn.word(1)) {
2515            case spv::CapabilityMatrix:
2516            case spv::CapabilityShader:
2517            case spv::CapabilityInputAttachment:
2518            case spv::CapabilitySampled1D:
2519            case spv::CapabilityImage1D:
2520            case spv::CapabilitySampledBuffer:
2521            case spv::CapabilityImageBuffer:
2522            case spv::CapabilityImageQuery:
2523            case spv::CapabilityDerivativeControl:
2524                // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2525                break;
2526
2527            case spv::CapabilityGeometry:
2528                pass &= require_feature(my_data, enabledFeatures->geometryShader, "geometryShader");
2529                break;
2530
2531            case spv::CapabilityTessellation:
2532                pass &= require_feature(my_data, enabledFeatures->tessellationShader, "tessellationShader");
2533                break;
2534
2535            case spv::CapabilityFloat64:
2536                pass &= require_feature(my_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2537                break;
2538
2539            case spv::CapabilityInt64:
2540                pass &= require_feature(my_data, enabledFeatures->shaderInt64, "shaderInt64");
2541                break;
2542
2543            case spv::CapabilityTessellationPointSize:
2544            case spv::CapabilityGeometryPointSize:
2545                pass &= require_feature(my_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2546                                        "shaderTessellationAndGeometryPointSize");
2547                break;
2548
2549            case spv::CapabilityImageGatherExtended:
2550                pass &= require_feature(my_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2551                break;
2552
2553            case spv::CapabilityStorageImageMultisample:
2554                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2555                break;
2556
2557            case spv::CapabilityUniformBufferArrayDynamicIndexing:
2558                pass &= require_feature(my_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2559                                        "shaderUniformBufferArrayDynamicIndexing");
2560                break;
2561
2562            case spv::CapabilitySampledImageArrayDynamicIndexing:
2563                pass &= require_feature(my_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2564                                        "shaderSampledImageArrayDynamicIndexing");
2565                break;
2566
2567            case spv::CapabilityStorageBufferArrayDynamicIndexing:
2568                pass &= require_feature(my_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2569                                        "shaderStorageBufferArrayDynamicIndexing");
2570                break;
2571
2572            case spv::CapabilityStorageImageArrayDynamicIndexing:
2573                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2574                                        "shaderStorageImageArrayDynamicIndexing");
2575                break;
2576
2577            case spv::CapabilityClipDistance:
2578                pass &= require_feature(my_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2579                break;
2580
2581            case spv::CapabilityCullDistance:
2582                pass &= require_feature(my_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2583                break;
2584
2585            case spv::CapabilityImageCubeArray:
2586                pass &= require_feature(my_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2587                break;
2588
2589            case spv::CapabilitySampleRateShading:
2590                pass &= require_feature(my_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2591                break;
2592
2593            case spv::CapabilitySparseResidency:
2594                pass &= require_feature(my_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2595                break;
2596
2597            case spv::CapabilityMinLod:
2598                pass &= require_feature(my_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2599                break;
2600
2601            case spv::CapabilitySampledCubeArray:
2602                pass &= require_feature(my_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2603                break;
2604
2605            case spv::CapabilityImageMSArray:
2606                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2607                break;
2608
2609            case spv::CapabilityStorageImageExtendedFormats:
2610                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageExtendedFormats,
2611                                        "shaderStorageImageExtendedFormats");
2612                break;
2613
2614            case spv::CapabilityInterpolationFunction:
2615                pass &= require_feature(my_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2616                break;
2617
2618            case spv::CapabilityStorageImageReadWithoutFormat:
2619                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2620                                        "shaderStorageImageReadWithoutFormat");
2621                break;
2622
2623            case spv::CapabilityStorageImageWriteWithoutFormat:
2624                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2625                                        "shaderStorageImageWriteWithoutFormat");
2626                break;
2627
2628            case spv::CapabilityMultiViewport:
2629                pass &= require_feature(my_data, enabledFeatures->multiViewport, "multiViewport");
2630                break;
2631
2632            default:
2633                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /* dev */0,
2634                            __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2635                            "Shader declares capability %u, not supported in Vulkan.",
2636                            insn.word(1)))
2637                    pass = VK_FALSE;
2638                break;
2639            }
2640        }
2641    }
2642
2643    return pass;
2644}
2645
2646
2647// Validate that the shaders used by the given pipeline
2648//  As a side effect this function also records the sets that are actually used by the pipeline
2649static VkBool32 validate_pipeline_shaders(layer_data *my_data, VkDevice dev, PIPELINE_NODE *pPipeline) {
2650    VkGraphicsPipelineCreateInfo const *pCreateInfo = &pPipeline->graphicsPipelineCI;
2651    /* We seem to allow pipeline stages to be specified out of order, so collect and identify them
2652     * before trying to do anything more: */
2653    int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2654    int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2655
2656    shader_module *shaders[5];
2657    memset(shaders, 0, sizeof(shaders));
2658    spirv_inst_iter entrypoints[5];
2659    memset(entrypoints, 0, sizeof(entrypoints));
2660    RENDER_PASS_NODE const *rp = 0;
2661    VkPipelineVertexInputStateCreateInfo const *vi = 0;
2662    VkBool32 pass = VK_TRUE;
2663
2664    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2665        VkPipelineShaderStageCreateInfo const *pStage = &pCreateInfo->pStages[i];
2666        if (pStage->sType == VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO) {
2667
2668            if ((pStage->stage & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT |
2669                                  VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) == 0) {
2670                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2671                            /*dev*/ 0, __LINE__, SHADER_CHECKER_UNKNOWN_STAGE, "SC", "Unknown shader stage %d", pStage->stage)) {
2672                    pass = VK_FALSE;
2673                }
2674            } else {
2675                pass = validate_specialization_offsets(my_data, pStage) && pass;
2676
2677                auto stage_id = get_shader_stage_id(pStage->stage);
2678                auto module = my_data->shaderModuleMap[pStage->module].get();
2679                shaders[stage_id] = module;
2680
2681                /* find the entrypoint */
2682                entrypoints[stage_id] = find_entrypoint(module, pStage->pName, pStage->stage);
2683                if (entrypoints[stage_id] == module->end()) {
2684                    if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2685                                /*dev*/ 0, __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC",
2686                                "No entrypoint found named `%s` for stage %s", pStage->pName,
2687                                string_VkShaderStageFlagBits(pStage->stage))) {
2688                        pass = VK_FALSE;
2689                    }
2690                }
2691
2692                /* validate shader capabilities against enabled device features */
2693                pass = validate_shader_capabilities(my_data, dev, module) && pass;
2694
2695                /* mark accessible ids */
2696                std::unordered_set<uint32_t> accessible_ids;
2697                mark_accessible_ids(module, entrypoints[stage_id], accessible_ids);
2698
2699                /* validate descriptor set layout against what the entrypoint actually uses */
2700                std::map<descriptor_slot_t, interface_var> descriptor_uses;
2701                collect_interface_by_descriptor_slot(my_data, dev, module, accessible_ids, descriptor_uses);
2702
2703                auto layouts = pCreateInfo->layout != VK_NULL_HANDLE
2704                                   ? &(my_data->pipelineLayoutMap[pCreateInfo->layout].descriptorSetLayouts)
2705                                   : nullptr;
2706
2707                for (auto use : descriptor_uses) {
2708                    // As a side-effect of this function, capture which sets are used by the pipeline
2709                    pPipeline->active_sets.insert(use.first.first);
2710
2711                    /* find the matching binding */
2712                    auto binding = get_descriptor_binding(my_data, layouts, use.first);
2713                    unsigned required_descriptor_count;
2714
2715                    if (!binding) {
2716                        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2717                                    /*dev*/ 0, __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2718                                    "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2719                                    use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2720                            pass = VK_FALSE;
2721                        }
2722                    } else if (~binding->stageFlags & pStage->stage) {
2723                        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2724                                    /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2725                                    "Shader uses descriptor slot %u.%u (used "
2726                                    "as type `%s`) but descriptor not "
2727                                    "accessible from stage %s",
2728                                    use.first.first, use.first.second,
2729                                    describe_type(module, use.second.type_id).c_str(),
2730                                    string_VkShaderStageFlagBits(pStage->stage))) {
2731                            pass = VK_FALSE;
2732                        }
2733                    } else if (!descriptor_type_match(my_data, module, use.second.type_id, binding->descriptorType, /*out*/ required_descriptor_count)) {
2734                        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2735                                    /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2736                                    "Type mismatch on descriptor slot "
2737                                    "%u.%u (used as type `%s`) but "
2738                                    "descriptor of type %s",
2739                                    use.first.first, use.first.second,
2740                                    describe_type(module, use.second.type_id).c_str(),
2741                                    string_VkDescriptorType(binding->descriptorType))) {
2742                            pass = VK_FALSE;
2743                        }
2744                    } else if (binding->descriptorCount < required_descriptor_count) {
2745                        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2746                                    /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2747                                    "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2748                                    required_descriptor_count, use.first.first, use.first.second,
2749                                    describe_type(module, use.second.type_id).c_str(),
2750                                    binding->descriptorCount)) {
2751                            pass = VK_FALSE;
2752                        }
2753                    }
2754                }
2755
2756                /* validate push constant usage */
2757                pass =
2758                    validate_push_constant_usage(my_data, dev, &my_data->pipelineLayoutMap[pCreateInfo->layout].pushConstantRanges,
2759                                                 module, accessible_ids, pStage->stage) &&
2760                    pass;
2761            }
2762        }
2763    }
2764
2765    if (pCreateInfo->renderPass != VK_NULL_HANDLE)
2766        rp = my_data->renderPassMap[pCreateInfo->renderPass];
2767
2768    vi = pCreateInfo->pVertexInputState;
2769
2770    if (vi) {
2771        pass = validate_vi_consistency(my_data, dev, vi) && pass;
2772    }
2773
2774    if (shaders[vertex_stage]) {
2775        pass = validate_vi_against_vs_inputs(my_data, dev, vi, shaders[vertex_stage], entrypoints[vertex_stage]) && pass;
2776    }
2777
2778    /* TODO: enforce rules about present combinations of shaders */
2779    int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2780    int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2781
2782    while (!shaders[producer] && producer != fragment_stage) {
2783        producer++;
2784        consumer++;
2785    }
2786
2787    for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2788        assert(shaders[producer]);
2789        if (shaders[consumer]) {
2790            pass = validate_interface_between_stages(my_data, dev, shaders[producer], entrypoints[producer],
2791                                                     shader_stage_attribs[producer].name, shaders[consumer], entrypoints[consumer],
2792                                                     shader_stage_attribs[consumer].name,
2793                                                     shader_stage_attribs[consumer].arrayed_input) &&
2794                   pass;
2795
2796            producer = consumer;
2797        }
2798    }
2799
2800    if (shaders[fragment_stage] && rp) {
2801        pass = validate_fs_outputs_against_render_pass(my_data, dev, shaders[fragment_stage], entrypoints[fragment_stage], rp,
2802                                                       pCreateInfo->subpass) &&
2803               pass;
2804    }
2805
2806    return pass;
2807}
2808
2809// Return Set node ptr for specified set or else NULL
2810static SET_NODE *getSetNode(layer_data *my_data, const VkDescriptorSet set) {
2811    if (my_data->setMap.find(set) == my_data->setMap.end()) {
2812        return NULL;
2813    }
2814    return my_data->setMap[set];
2815}
2816// For the given command buffer, verify that for each set set in activeSetNodes
2817//  that any dynamic descriptor in that set has a valid dynamic offset bound.
2818//  To be valid, the dynamic offset combined with the offset and range from its
2819//  descriptor update must not overflow the size of its buffer being updated
2820static VkBool32 validate_dynamic_offsets(layer_data *my_data, const GLOBAL_CB_NODE *pCB, const vector<SET_NODE *> activeSetNodes) {
2821    VkBool32 result = VK_FALSE;
2822
2823    VkWriteDescriptorSet *pWDS = NULL;
2824    uint32_t dynOffsetIndex = 0;
2825    VkDeviceSize bufferSize = 0;
2826    for (auto set_node : activeSetNodes) {
2827        for (uint32_t i = 0; i < set_node->descriptorCount; ++i) {
2828            // TODO: Add validation for descriptors dynamically skipped in shader
2829            if (set_node->ppDescriptors[i] != NULL) {
2830                switch (set_node->ppDescriptors[i]->sType) {
2831                case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
2832                    pWDS = (VkWriteDescriptorSet *)set_node->ppDescriptors[i];
2833                    if ((pWDS->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
2834                        (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
2835                        for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2836                            bufferSize = my_data->bufferMap[pWDS->pBufferInfo[j].buffer].create_info->size;
2837                            uint32_t dynOffset = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].dynamicOffsets[dynOffsetIndex];
2838                            if (pWDS->pBufferInfo[j].range == VK_WHOLE_SIZE) {
2839                                if ((dynOffset + pWDS->pBufferInfo[j].offset) > bufferSize) {
2840                                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2841                                                      VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2842                                                      reinterpret_cast<const uint64_t &>(set_node->set), __LINE__,
2843                                                      DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, "DS",
2844                                                      "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has range of "
2845                                                      "VK_WHOLE_SIZE but dynamic offset %#" PRIxLEAST32 ". "
2846                                                      "combined with offset %#" PRIxLEAST64 " oversteps its buffer (%#" PRIxLEAST64
2847                                                      ") which has a size of %#" PRIxLEAST64 ".",
2848                                                      reinterpret_cast<const uint64_t &>(set_node->set), i,
2849                                                      pCB->dynamicOffsets[dynOffsetIndex], pWDS->pBufferInfo[j].offset,
2850                                                      reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize);
2851                                }
2852                            } else if ((dynOffset + pWDS->pBufferInfo[j].offset + pWDS->pBufferInfo[j].range) > bufferSize) {
2853                                result |= log_msg(
2854                                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2855                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2856                                    reinterpret_cast<const uint64_t &>(set_node->set), __LINE__, DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW,
2857                                    "DS",
2858                                    "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has dynamic offset %#" PRIxLEAST32 ". "
2859                                    "Combined with offset %#" PRIxLEAST64 " and range %#" PRIxLEAST64
2860                                    " from its update, this oversteps its buffer "
2861                                    "(%#" PRIxLEAST64 ") which has a size of %#" PRIxLEAST64 ".",
2862                                    reinterpret_cast<const uint64_t &>(set_node->set), i, pCB->dynamicOffsets[dynOffsetIndex],
2863                                    pWDS->pBufferInfo[j].offset, pWDS->pBufferInfo[j].range,
2864                                    reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize);
2865                            } else if ((dynOffset + pWDS->pBufferInfo[j].offset + pWDS->pBufferInfo[j].range) > bufferSize) {
2866                                result |= log_msg(
2867                                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2868                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2869                                    reinterpret_cast<const uint64_t &>(set_node->set), __LINE__, DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW,
2870                                    "DS",
2871                                    "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has dynamic offset %#" PRIxLEAST32 ". "
2872                                    "Combined with offset %#" PRIxLEAST64 " and range %#" PRIxLEAST64
2873                                    " from its update, this oversteps its buffer "
2874                                    "(%#" PRIxLEAST64 ") which has a size of %#" PRIxLEAST64 ".",
2875                                    reinterpret_cast<const uint64_t &>(set_node->set), i, pCB->dynamicOffsets[dynOffsetIndex],
2876                                    pWDS->pBufferInfo[j].offset, pWDS->pBufferInfo[j].range,
2877                                    reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize);
2878                            }
2879                            dynOffsetIndex++;
2880                            i += j; // Advance i to end of this set of descriptors (++i at end of for loop will move 1 index past
2881                                    // last of these descriptors)
2882                        }
2883                    }
2884                    break;
2885                default: // Currently only shadowing Write update nodes so shouldn't get here
2886                    assert(0);
2887                    continue;
2888                }
2889            }
2890        }
2891    }
2892    return result;
2893}
2894
2895// Validate overall state at the time of a draw call
2896static VkBool32 validate_draw_state(layer_data *my_data, GLOBAL_CB_NODE *pCB, VkBool32 indexedDraw) {
2897    // First check flag states
2898    VkBool32 result = validate_draw_state_flags(my_data, pCB, indexedDraw);
2899    PIPELINE_NODE *pPipe = getPipeline(my_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
2900    // Now complete other state checks
2901    // TODO : Currently only performing next check if *something* was bound (non-zero last bound)
2902    //  There is probably a better way to gate when this check happens, and to know if something *should* have been bound
2903    //  We should have that check separately and then gate this check based on that check
2904    if (pPipe) {
2905        auto const &state = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS];
2906        if (state.pipelineLayout) {
2907            string errorString;
2908            // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets
2909            vector<SET_NODE *> activeSetNodes;
2910            for (auto setIndex : pPipe->active_sets) {
2911                // If valid set is not bound throw an error
2912                if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
2913                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2914                                      __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
2915                                      "VkPipeline %#" PRIxLEAST64 " uses set #%u but that set is not bound.",
2916                                      (uint64_t)pPipe->pipeline, setIndex);
2917                } else if (!verify_set_layout_compatibility(my_data, my_data->setMap[state.boundDescriptorSets[setIndex]],
2918                                                            pPipe->graphicsPipelineCI.layout, setIndex, errorString)) {
2919                    // Set is bound but not compatible w/ overlapping pipelineLayout from PSO
2920                    VkDescriptorSet setHandle = my_data->setMap[state.boundDescriptorSets[setIndex]]->set;
2921                    result |= log_msg(
2922                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2923                        (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
2924                        "VkDescriptorSet (%#" PRIxLEAST64
2925                        ") bound as set #%u is not compatible with overlapping VkPipelineLayout %#" PRIxLEAST64 " due to: %s",
2926                        (uint64_t)setHandle, setIndex, (uint64_t)pPipe->graphicsPipelineCI.layout, errorString.c_str());
2927                } else { // Valid set is bound and layout compatible, validate that it's updated and verify any dynamic offsets
2928                    // Pull the set node
2929                    SET_NODE *pSet = my_data->setMap[state.boundDescriptorSets[setIndex]];
2930                    // Save vector of all active sets to verify dynamicOffsets below
2931                    activeSetNodes.push_back(pSet);
2932                    // Make sure set has been updated
2933                    if (!pSet->pUpdateStructs) {
2934                        result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2935                                          VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pSet->set, __LINE__,
2936                                          DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2937                                          "DS %#" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so "
2938                                                              "this will result in undefined behavior.",
2939                                          (uint64_t)pSet->set);
2940                    }
2941                }
2942            }
2943            // For each dynamic descriptor, make sure dynamic offset doesn't overstep buffer
2944            if (!state.dynamicOffsets.empty())
2945                result |= validate_dynamic_offsets(my_data, pCB, activeSetNodes);
2946        }
2947        // Verify Vtx binding
2948        if (pPipe->vertexBindingDescriptions.size() > 0) {
2949            for (size_t i = 0; i < pPipe->vertexBindingDescriptions.size(); i++) {
2950                if ((pCB->currentDrawData.buffers.size() < (i + 1)) || (pCB->currentDrawData.buffers[i] == VK_NULL_HANDLE)) {
2951                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2952                                      __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2953                                      "The Pipeline State Object (%#" PRIxLEAST64
2954                                      ") expects that this Command Buffer's vertex binding Index " PRINTF_SIZE_T_SPECIFIER
2955                                      " should be set via vkCmdBindVertexBuffers.",
2956                                      (uint64_t)pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline, i);
2957                }
2958            }
2959        } else {
2960            if (!pCB->currentDrawData.buffers.empty()) {
2961                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
2962                                  0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2963                                  "Vertex buffers are bound to command buffer (%#" PRIxLEAST64
2964                                  ") but no vertex buffers are attached to this Pipeline State Object (%#" PRIxLEAST64 ").",
2965                                  (uint64_t)pCB->commandBuffer, (uint64_t)pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
2966            }
2967        }
2968        // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
2969        // Skip check if rasterization is disabled or there is no viewport.
2970        if ((!pPipe->graphicsPipelineCI.pRasterizationState ||
2971             !pPipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) &&
2972            pPipe->graphicsPipelineCI.pViewportState) {
2973            VkBool32 dynViewport = isDynamic(pPipe, VK_DYNAMIC_STATE_VIEWPORT);
2974            VkBool32 dynScissor = isDynamic(pPipe, VK_DYNAMIC_STATE_SCISSOR);
2975            if (dynViewport) {
2976                if (pCB->viewports.size() != pPipe->graphicsPipelineCI.pViewportState->viewportCount) {
2977                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2978                                      __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2979                                      "Dynamic viewportCount from vkCmdSetViewport() is " PRINTF_SIZE_T_SPECIFIER
2980                                      ", but PSO viewportCount is %u. These counts must match.",
2981                                      pCB->viewports.size(), pPipe->graphicsPipelineCI.pViewportState->viewportCount);
2982                }
2983            }
2984            if (dynScissor) {
2985                if (pCB->scissors.size() != pPipe->graphicsPipelineCI.pViewportState->scissorCount) {
2986                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2987                                      __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2988                                      "Dynamic scissorCount from vkCmdSetScissor() is " PRINTF_SIZE_T_SPECIFIER
2989                                      ", but PSO scissorCount is %u. These counts must match.",
2990                                      pCB->scissors.size(), pPipe->graphicsPipelineCI.pViewportState->scissorCount);
2991                }
2992            }
2993        }
2994    }
2995    return result;
2996}
2997
2998// Verify that create state for a pipeline is valid
2999static VkBool32 verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_NODE *> pPipelines,
3000                                          int pipelineIndex) {
3001    VkBool32 skipCall = VK_FALSE;
3002
3003    PIPELINE_NODE *pPipeline = pPipelines[pipelineIndex];
3004
3005    // If create derivative bit is set, check that we've specified a base
3006    // pipeline correctly, and that the base pipeline was created to allow
3007    // derivatives.
3008    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
3009        PIPELINE_NODE *pBasePipeline = nullptr;
3010        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
3011              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
3012            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3013                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3014                                "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
3015        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
3016            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
3017                skipCall |=
3018                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3019                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3020                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
3021            } else {
3022                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
3023            }
3024        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
3025            pBasePipeline = getPipeline(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
3026        }
3027
3028        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
3029            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3030                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3031                                "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
3032        }
3033    }
3034
3035    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
3036        if (!my_data->physDevProperties.features.independentBlend) {
3037            VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
3038            for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
3039                if ((pAttachments[0].blendEnable != pAttachments[i].blendEnable) ||
3040                    (pAttachments[0].srcColorBlendFactor != pAttachments[i].srcColorBlendFactor) ||
3041                    (pAttachments[0].dstColorBlendFactor != pAttachments[i].dstColorBlendFactor) ||
3042                    (pAttachments[0].colorBlendOp != pAttachments[i].colorBlendOp) ||
3043                    (pAttachments[0].srcAlphaBlendFactor != pAttachments[i].srcAlphaBlendFactor) ||
3044                    (pAttachments[0].dstAlphaBlendFactor != pAttachments[i].dstAlphaBlendFactor) ||
3045                    (pAttachments[0].alphaBlendOp != pAttachments[i].alphaBlendOp) ||
3046                    (pAttachments[0].colorWriteMask != pAttachments[i].colorWriteMask)) {
3047                    skipCall |=
3048                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3049                                DRAWSTATE_INDEPENDENT_BLEND, "DS", "Invalid Pipeline CreateInfo: If independent blend feature not "
3050                                                                   "enabled, all elements of pAttachments must be identical");
3051                }
3052            }
3053        }
3054        if (!my_data->physDevProperties.features.logicOp &&
3055            (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
3056            skipCall |=
3057                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3058                        DRAWSTATE_DISABLED_LOGIC_OP, "DS",
3059                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE");
3060        }
3061        if ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable == VK_TRUE) &&
3062            ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOp < VK_LOGIC_OP_CLEAR) ||
3063             (pPipeline->graphicsPipelineCI.pColorBlendState->logicOp > VK_LOGIC_OP_SET))) {
3064            skipCall |=
3065                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3066                        DRAWSTATE_INVALID_LOGIC_OP, "DS",
3067                        "Invalid Pipeline CreateInfo: If logicOpEnable is VK_TRUE, logicOp must be a valid VkLogicOp value");
3068        }
3069    }
3070
3071    // Ensure the subpass index is valid. If not, then validate_pipeline_shaders
3072    // produces nonsense errors that confuse users. Other layers should already
3073    // emit errors for renderpass being invalid.
3074    auto rp_data = my_data->renderPassMap.find(pPipeline->graphicsPipelineCI.renderPass);
3075    if (rp_data != my_data->renderPassMap.end() &&
3076        pPipeline->graphicsPipelineCI.subpass >= rp_data->second->pCreateInfo->subpassCount) {
3077        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3078                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
3079                                                                           "is out of range for this renderpass (0..%u)",
3080                            pPipeline->graphicsPipelineCI.subpass, rp_data->second->pCreateInfo->subpassCount - 1);
3081    }
3082
3083    if (!validate_pipeline_shaders(my_data, device, pPipeline)) {
3084        skipCall = VK_TRUE;
3085    }
3086    // VS is required
3087    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3088        skipCall |=
3089            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3090                    DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vtx Shader required");
3091    }
3092    // Either both or neither TC/TE shaders should be defined
3093    if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) !=
3094        ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) {
3095        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3096                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3097                            "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair");
3098    }
3099    // Compute shaders should be specified independent of Gfx shaders
3100    if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) &&
3101        (pPipeline->active_shaders &
3102         (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
3103          VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) {
3104        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3105                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3106                            "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline");
3107    }
3108    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3109    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3110    if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3111        (pPipeline->iaStateCI.topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3112        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3113                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3114                                                                           "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3115                                                                           "topology for tessellation pipelines");
3116    }
3117    if (pPipeline->iaStateCI.topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3118        if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3119            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3120                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3121                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3122                                                                               "topology is only valid for tessellation pipelines");
3123        }
3124        if (!pPipeline->tessStateCI.patchControlPoints || (pPipeline->tessStateCI.patchControlPoints > 32)) {
3125            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3126                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3127                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3128                                                                               "topology used with patchControlPoints value %u."
3129                                                                               " patchControlPoints should be >0 and <=32.",
3130                                pPipeline->tessStateCI.patchControlPoints);
3131        }
3132    }
3133    // Viewport state must be included if rasterization is enabled.
3134    // If the viewport state is included, the viewport and scissor counts should always match.
3135    // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler
3136    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3137        !pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
3138        if (!pPipeline->graphicsPipelineCI.pViewportState) {
3139            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3140                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport "
3141                                                                           "and scissors are dynamic PSO must include "
3142                                                                           "viewportCount and scissorCount in pViewportState.");
3143        } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount !=
3144                   pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
3145            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3146                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3147                                "Gfx Pipeline viewport count (%u) must match scissor count (%u).",
3148                                pPipeline->vpStateCI.viewportCount, pPipeline->vpStateCI.scissorCount);
3149        } else {
3150            // If viewport or scissor are not dynamic, then verify that data is appropriate for count
3151            VkBool32 dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3152            VkBool32 dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3153            if (!dynViewport) {
3154                if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount &&
3155                    !pPipeline->graphicsPipelineCI.pViewportState->pViewports) {
3156                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3157                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3158                                        "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you "
3159                                        "must either include pViewports data, or include viewport in pDynamicState and set it with "
3160                                        "vkCmdSetViewport().",
3161                                        pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
3162                }
3163            }
3164            if (!dynScissor) {
3165                if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount &&
3166                    !pPipeline->graphicsPipelineCI.pViewportState->pScissors) {
3167                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3168                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3169                                        "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you "
3170                                        "must either include pScissors data, or include scissor in pDynamicState and set it with "
3171                                        "vkCmdSetScissor().",
3172                                        pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3173                }
3174            }
3175        }
3176    }
3177    return skipCall;
3178}
3179
3180// Init the pipeline mapping info based on pipeline create info LL tree
3181//  Threading note : Calls to this function should wrapped in mutex
3182// TODO : this should really just be in the constructor for PIPELINE_NODE
3183static PIPELINE_NODE *initGraphicsPipeline(layer_data *dev_data, const VkGraphicsPipelineCreateInfo *pCreateInfo) {
3184    PIPELINE_NODE *pPipeline = new PIPELINE_NODE;
3185
3186    // First init create info
3187    memcpy(&pPipeline->graphicsPipelineCI, pCreateInfo, sizeof(VkGraphicsPipelineCreateInfo));
3188
3189    size_t bufferSize = 0;
3190    const VkPipelineVertexInputStateCreateInfo *pVICI = NULL;
3191    const VkPipelineColorBlendStateCreateInfo *pCBCI = NULL;
3192
3193    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
3194        const VkPipelineShaderStageCreateInfo *pPSSCI = &pCreateInfo->pStages[i];
3195
3196        switch (pPSSCI->stage) {
3197        case VK_SHADER_STAGE_VERTEX_BIT:
3198            memcpy(&pPipeline->vsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3199            pPipeline->active_shaders |= VK_SHADER_STAGE_VERTEX_BIT;
3200            break;
3201        case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
3202            memcpy(&pPipeline->tcsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3203            pPipeline->active_shaders |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
3204            break;
3205        case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
3206            memcpy(&pPipeline->tesCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3207            pPipeline->active_shaders |= VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
3208            break;
3209        case VK_SHADER_STAGE_GEOMETRY_BIT:
3210            memcpy(&pPipeline->gsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3211            pPipeline->active_shaders |= VK_SHADER_STAGE_GEOMETRY_BIT;
3212            break;
3213        case VK_SHADER_STAGE_FRAGMENT_BIT:
3214            memcpy(&pPipeline->fsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3215            pPipeline->active_shaders |= VK_SHADER_STAGE_FRAGMENT_BIT;
3216            break;
3217        case VK_SHADER_STAGE_COMPUTE_BIT:
3218            // TODO : Flag error, CS is specified through VkComputePipelineCreateInfo
3219            pPipeline->active_shaders |= VK_SHADER_STAGE_COMPUTE_BIT;
3220            break;
3221        default:
3222            // TODO : Flag error
3223            break;
3224        }
3225    }
3226    // Copy over GraphicsPipelineCreateInfo structure embedded pointers
3227    if (pCreateInfo->stageCount != 0) {
3228        pPipeline->graphicsPipelineCI.pStages = new VkPipelineShaderStageCreateInfo[pCreateInfo->stageCount];
3229        bufferSize = pCreateInfo->stageCount * sizeof(VkPipelineShaderStageCreateInfo);
3230        memcpy((void *)pPipeline->graphicsPipelineCI.pStages, pCreateInfo->pStages, bufferSize);
3231    }
3232    if (pCreateInfo->pVertexInputState != NULL) {
3233        pPipeline->vertexInputCI = *pCreateInfo->pVertexInputState;
3234        // Copy embedded ptrs
3235        pVICI = pCreateInfo->pVertexInputState;
3236        if (pVICI->vertexBindingDescriptionCount) {
3237            pPipeline->vertexBindingDescriptions = std::vector<VkVertexInputBindingDescription>(
3238                pVICI->pVertexBindingDescriptions, pVICI->pVertexBindingDescriptions + pVICI->vertexBindingDescriptionCount);
3239        }
3240        if (pVICI->vertexAttributeDescriptionCount) {
3241            pPipeline->vertexAttributeDescriptions = std::vector<VkVertexInputAttributeDescription>(
3242                pVICI->pVertexAttributeDescriptions, pVICI->pVertexAttributeDescriptions + pVICI->vertexAttributeDescriptionCount);
3243        }
3244        pPipeline->graphicsPipelineCI.pVertexInputState = &pPipeline->vertexInputCI;
3245    }
3246    if (pCreateInfo->pInputAssemblyState != NULL) {
3247        pPipeline->iaStateCI = *pCreateInfo->pInputAssemblyState;
3248        pPipeline->graphicsPipelineCI.pInputAssemblyState = &pPipeline->iaStateCI;
3249    }
3250    if (pCreateInfo->pTessellationState != NULL) {
3251        pPipeline->tessStateCI = *pCreateInfo->pTessellationState;
3252        pPipeline->graphicsPipelineCI.pTessellationState = &pPipeline->tessStateCI;
3253    }
3254    if (pCreateInfo->pViewportState != NULL) {
3255        pPipeline->vpStateCI = *pCreateInfo->pViewportState;
3256        pPipeline->graphicsPipelineCI.pViewportState = &pPipeline->vpStateCI;
3257    }
3258    if (pCreateInfo->pRasterizationState != NULL) {
3259        pPipeline->rsStateCI = *pCreateInfo->pRasterizationState;
3260        pPipeline->graphicsPipelineCI.pRasterizationState = &pPipeline->rsStateCI;
3261    }
3262    if (pCreateInfo->pMultisampleState != NULL) {
3263        pPipeline->msStateCI = *pCreateInfo->pMultisampleState;
3264        pPipeline->graphicsPipelineCI.pMultisampleState = &pPipeline->msStateCI;
3265    }
3266    if (pCreateInfo->pDepthStencilState != NULL) {
3267        pPipeline->dsStateCI = *pCreateInfo->pDepthStencilState;
3268        pPipeline->graphicsPipelineCI.pDepthStencilState = &pPipeline->dsStateCI;
3269    }
3270    if (pCreateInfo->pColorBlendState != NULL) {
3271        pPipeline->cbStateCI = *pCreateInfo->pColorBlendState;
3272        // Copy embedded ptrs
3273        pCBCI = pCreateInfo->pColorBlendState;
3274        if (pCBCI->attachmentCount) {
3275            pPipeline->attachments = std::vector<VkPipelineColorBlendAttachmentState>(
3276                pCBCI->pAttachments, pCBCI->pAttachments + pCBCI->attachmentCount);
3277        }
3278        pPipeline->graphicsPipelineCI.pColorBlendState = &pPipeline->cbStateCI;
3279    }
3280    if (pCreateInfo->pDynamicState != NULL) {
3281        pPipeline->dynStateCI = *pCreateInfo->pDynamicState;
3282        if (pPipeline->dynStateCI.dynamicStateCount) {
3283            pPipeline->dynStateCI.pDynamicStates = new VkDynamicState[pPipeline->dynStateCI.dynamicStateCount];
3284            bufferSize = pPipeline->dynStateCI.dynamicStateCount * sizeof(VkDynamicState);
3285            memcpy((void *)pPipeline->dynStateCI.pDynamicStates, pCreateInfo->pDynamicState->pDynamicStates, bufferSize);
3286        }
3287        pPipeline->graphicsPipelineCI.pDynamicState = &pPipeline->dynStateCI;
3288    }
3289    return pPipeline;
3290}
3291
3292// Free the Pipeline nodes
3293static void deletePipelines(layer_data *my_data) {
3294    if (my_data->pipelineMap.size() <= 0)
3295        return;
3296    for (auto ii = my_data->pipelineMap.begin(); ii != my_data->pipelineMap.end(); ++ii) {
3297        if ((*ii).second->graphicsPipelineCI.stageCount != 0) {
3298            delete[](*ii).second->graphicsPipelineCI.pStages;
3299        }
3300        if ((*ii).second->dynStateCI.dynamicStateCount != 0) {
3301            delete[](*ii).second->dynStateCI.pDynamicStates;
3302        }
3303        delete (*ii).second;
3304    }
3305    my_data->pipelineMap.clear();
3306}
3307
3308// For given pipeline, return number of MSAA samples, or one if MSAA disabled
3309static VkSampleCountFlagBits getNumSamples(layer_data *my_data, const VkPipeline pipeline) {
3310    PIPELINE_NODE *pPipe = my_data->pipelineMap[pipeline];
3311    if (VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pPipe->msStateCI.sType) {
3312        return pPipe->msStateCI.rasterizationSamples;
3313    }
3314    return VK_SAMPLE_COUNT_1_BIT;
3315}
3316
3317// Validate state related to the PSO
3318static VkBool32 validatePipelineState(layer_data *my_data, const GLOBAL_CB_NODE *pCB, const VkPipelineBindPoint pipelineBindPoint,
3319                                      const VkPipeline pipeline) {
3320    if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
3321        // Verify that any MSAA request in PSO matches sample# in bound FB
3322        // Skip the check if rasterization is disabled.
3323        PIPELINE_NODE *pPipeline = my_data->pipelineMap[pipeline];
3324        if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3325            !pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
3326            VkSampleCountFlagBits psoNumSamples = getNumSamples(my_data, pipeline);
3327            if (pCB->activeRenderPass) {
3328                const VkRenderPassCreateInfo *pRPCI = my_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo;
3329                const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
3330                VkSampleCountFlagBits subpassNumSamples = (VkSampleCountFlagBits)0;
3331                uint32_t i;
3332
3333                for (i = 0; i < pSD->colorAttachmentCount; i++) {
3334                    VkSampleCountFlagBits samples;
3335
3336                    if (pSD->pColorAttachments[i].attachment == VK_ATTACHMENT_UNUSED)
3337                        continue;
3338
3339                    samples = pRPCI->pAttachments[pSD->pColorAttachments[i].attachment].samples;
3340                    if (subpassNumSamples == (VkSampleCountFlagBits)0) {
3341                        subpassNumSamples = samples;
3342                    } else if (subpassNumSamples != samples) {
3343                        subpassNumSamples = (VkSampleCountFlagBits)-1;
3344                        break;
3345                    }
3346                }
3347                if (pSD->pDepthStencilAttachment && pSD->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3348                    const VkSampleCountFlagBits samples = pRPCI->pAttachments[pSD->pDepthStencilAttachment->attachment].samples;
3349                    if (subpassNumSamples == (VkSampleCountFlagBits)0)
3350                        subpassNumSamples = samples;
3351                    else if (subpassNumSamples != samples)
3352                        subpassNumSamples = (VkSampleCountFlagBits)-1;
3353                }
3354
3355                if (psoNumSamples != subpassNumSamples) {
3356                    return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3357                                   (uint64_t)pipeline, __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
3358                                   "Num samples mismatch! Binding PSO (%#" PRIxLEAST64
3359                                   ") with %u samples while current RenderPass (%#" PRIxLEAST64 ") w/ %u samples!",
3360                                   (uint64_t)pipeline, psoNumSamples, (uint64_t)pCB->activeRenderPass, subpassNumSamples);
3361                }
3362            } else {
3363                // TODO : I believe it's an error if we reach this point and don't have an activeRenderPass
3364                //   Verify and flag error as appropriate
3365            }
3366        }
3367        // TODO : Add more checks here
3368    } else {
3369        // TODO : Validate non-gfx pipeline updates
3370    }
3371    return VK_FALSE;
3372}
3373
3374// Block of code at start here specifically for managing/tracking DSs
3375
3376// Return Pool node ptr for specified pool or else NULL
3377static DESCRIPTOR_POOL_NODE *getPoolNode(layer_data *my_data, const VkDescriptorPool pool) {
3378    if (my_data->descriptorPoolMap.find(pool) == my_data->descriptorPoolMap.end()) {
3379        return NULL;
3380    }
3381    return my_data->descriptorPoolMap[pool];
3382}
3383
3384static LAYOUT_NODE *getLayoutNode(layer_data *my_data, const VkDescriptorSetLayout layout) {
3385    if (my_data->descriptorSetLayoutMap.find(layout) == my_data->descriptorSetLayoutMap.end()) {
3386        return NULL;
3387    }
3388    return my_data->descriptorSetLayoutMap[layout];
3389}
3390
3391// Return VK_FALSE if update struct is of valid type, otherwise flag error and return code from callback
3392static VkBool32 validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3393    switch (pUpdateStruct->sType) {
3394    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3395    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3396        return VK_FALSE;
3397    default:
3398        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3399                       DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3400                       "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3401                       string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3402    }
3403}
3404
3405// Set count for given update struct in the last parameter
3406// Return value of skipCall, which is only VK_TRUE if error occurs and callback signals execution to cease
3407static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3408    switch (pUpdateStruct->sType) {
3409    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3410        return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3411    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3412        // TODO : Need to understand this case better and make sure code is correct
3413        return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3414    default:
3415        return 0;
3416    }
3417    return 0;
3418}
3419
3420// For given Layout Node and binding, return index where that binding begins
3421static uint32_t getBindingStartIndex(const LAYOUT_NODE *pLayout, const uint32_t binding) {
3422    uint32_t offsetIndex = 0;
3423    for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
3424        if (pLayout->createInfo.pBindings[i].binding == binding)
3425            break;
3426        offsetIndex += pLayout->createInfo.pBindings[i].descriptorCount;
3427    }
3428    return offsetIndex;
3429}
3430
3431// For given layout node and binding, return last index that is updated
3432static uint32_t getBindingEndIndex(const LAYOUT_NODE *pLayout, const uint32_t binding) {
3433    uint32_t offsetIndex = 0;
3434    for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
3435        offsetIndex += pLayout->createInfo.pBindings[i].descriptorCount;
3436        if (pLayout->createInfo.pBindings[i].binding == binding)
3437            break;
3438    }
3439    return offsetIndex - 1;
3440}
3441
3442// For given layout and update, return the first overall index of the layout that is updated
3443static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout, const uint32_t binding,
3444                                    const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3445    return getBindingStartIndex(pLayout, binding) + arrayIndex;
3446}
3447
3448// For given layout and update, return the last overall index of the layout that is updated
3449static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout, const uint32_t binding,
3450                                  const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3451    uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3452    return getBindingStartIndex(pLayout, binding) + arrayIndex + count - 1;
3453}
3454
3455// Verify that the descriptor type in the update struct matches what's expected by the layout
3456static VkBool32 validateUpdateConsistency(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout,
3457                                          const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3458    // First get actual type of update
3459    VkBool32 skipCall = VK_FALSE;
3460    VkDescriptorType actualType;
3461    uint32_t i = 0;
3462    switch (pUpdateStruct->sType) {
3463    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3464        actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3465        break;
3466    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3467        /* no need to validate */
3468        return VK_FALSE;
3469        break;
3470    default:
3471        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3472                            DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3473                            "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3474                            string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3475    }
3476    if (VK_FALSE == skipCall) {
3477        // Set first stageFlags as reference and verify that all other updates match it
3478        VkShaderStageFlags refStageFlags = pLayout->stageFlags[startIndex];
3479        for (i = startIndex; i <= endIndex; i++) {
3480            if (pLayout->descriptorTypes[i] != actualType) {
3481                skipCall |= log_msg(
3482                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3483                    DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3484                    "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3485                    string_VkDescriptorType(actualType), string_VkDescriptorType(pLayout->descriptorTypes[i]));
3486            }
3487            if (pLayout->stageFlags[i] != refStageFlags) {
3488                skipCall |= log_msg(
3489                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3490                    DRAWSTATE_DESCRIPTOR_STAGEFLAGS_MISMATCH, "DS",
3491                    "Write descriptor update has stageFlags %x that do not match overlapping binding descriptor stageFlags of %x!",
3492                    refStageFlags, pLayout->stageFlags[i]);
3493            }
3494        }
3495    }
3496    return skipCall;
3497}
3498
3499// Determine the update type, allocate a new struct of that type, shadow the given pUpdate
3500//   struct into the pNewNode param. Return VK_TRUE if error condition encountered and callback signals early exit.
3501// NOTE : Calls to this function should be wrapped in mutex
3502static VkBool32 shadowUpdateNode(layer_data *my_data, const VkDevice device, GENERIC_HEADER *pUpdate, GENERIC_HEADER **pNewNode) {
3503    VkBool32 skipCall = VK_FALSE;
3504    VkWriteDescriptorSet *pWDS = NULL;
3505    VkCopyDescriptorSet *pCDS = NULL;
3506    switch (pUpdate->sType) {
3507    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3508        pWDS = new VkWriteDescriptorSet;
3509        *pNewNode = (GENERIC_HEADER *)pWDS;
3510        memcpy(pWDS, pUpdate, sizeof(VkWriteDescriptorSet));
3511
3512        switch (pWDS->descriptorType) {
3513        case VK_DESCRIPTOR_TYPE_SAMPLER:
3514        case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3515        case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3516        case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
3517            VkDescriptorImageInfo *info = new VkDescriptorImageInfo[pWDS->descriptorCount];
3518            memcpy(info, pWDS->pImageInfo, pWDS->descriptorCount * sizeof(VkDescriptorImageInfo));
3519            pWDS->pImageInfo = info;
3520        } break;
3521        case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
3522        case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
3523            VkBufferView *info = new VkBufferView[pWDS->descriptorCount];
3524            memcpy(info, pWDS->pTexelBufferView, pWDS->descriptorCount * sizeof(VkBufferView));
3525            pWDS->pTexelBufferView = info;
3526        } break;
3527        case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3528        case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3529        case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
3530        case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
3531            VkDescriptorBufferInfo *info = new VkDescriptorBufferInfo[pWDS->descriptorCount];
3532            memcpy(info, pWDS->pBufferInfo, pWDS->descriptorCount * sizeof(VkDescriptorBufferInfo));
3533            pWDS->pBufferInfo = info;
3534        } break;
3535        default:
3536            return VK_ERROR_VALIDATION_FAILED_EXT;
3537            break;
3538        }
3539        break;
3540    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3541        pCDS = new VkCopyDescriptorSet;
3542        *pNewNode = (GENERIC_HEADER *)pCDS;
3543        memcpy(pCDS, pUpdate, sizeof(VkCopyDescriptorSet));
3544        break;
3545    default:
3546        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3547                    DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3548                    "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3549                    string_VkStructureType(pUpdate->sType), pUpdate->sType))
3550            return VK_TRUE;
3551    }
3552    // Make sure that pNext for the end of shadow copy is NULL
3553    (*pNewNode)->pNext = NULL;
3554    return skipCall;
3555}
3556
3557// Verify that given sampler is valid
3558static VkBool32 validateSampler(const layer_data *my_data, const VkSampler *pSampler, const VkBool32 immutable) {
3559    VkBool32 skipCall = VK_FALSE;
3560    auto sampIt = my_data->sampleMap.find(*pSampler);
3561    if (sampIt == my_data->sampleMap.end()) {
3562        if (!immutable) {
3563            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3564                                (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS",
3565                                "vkUpdateDescriptorSets: Attempt to update descriptor with invalid sampler %#" PRIxLEAST64,
3566                                (uint64_t)*pSampler);
3567        } else { // immutable
3568            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3569                                (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS",
3570                                "vkUpdateDescriptorSets: Attempt to update descriptor whose binding has an invalid immutable "
3571                                "sampler %#" PRIxLEAST64,
3572                                (uint64_t)*pSampler);
3573        }
3574    } else {
3575        // TODO : Any further checks we want to do on the sampler?
3576    }
3577    return skipCall;
3578}
3579
3580// find layout(s) on the cmd buf level
3581bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3582    ImageSubresourcePair imgpair = {image, true, range};
3583    auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3584    if (imgsubIt == pCB->imageLayoutMap.end()) {
3585        imgpair = {image, false, VkImageSubresource()};
3586        imgsubIt = pCB->imageLayoutMap.find(imgpair);
3587        if (imgsubIt == pCB->imageLayoutMap.end())
3588            return false;
3589    }
3590    node = imgsubIt->second;
3591    return true;
3592}
3593
3594// find layout(s) on the global level
3595bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3596    auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3597    if (imgsubIt == my_data->imageLayoutMap.end()) {
3598        imgpair = {imgpair.image, false, VkImageSubresource()};
3599        imgsubIt = my_data->imageLayoutMap.find(imgpair);
3600        if (imgsubIt == my_data->imageLayoutMap.end())
3601            return false;
3602    }
3603    layout = imgsubIt->second.layout;
3604    return true;
3605}
3606
3607bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3608    ImageSubresourcePair imgpair = {image, true, range};
3609    return FindLayout(my_data, imgpair, layout);
3610}
3611
3612bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3613    auto sub_data = my_data->imageSubresourceMap.find(image);
3614    if (sub_data == my_data->imageSubresourceMap.end())
3615        return false;
3616    auto imgIt = my_data->imageMap.find(image);
3617    if (imgIt == my_data->imageMap.end())
3618        return false;
3619    bool ignoreGlobal = false;
3620    // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3621    // potential errors in this case.
3622    if (sub_data->second.size() >= (imgIt->second.createInfo.arrayLayers * imgIt->second.createInfo.mipLevels + 1)) {
3623        ignoreGlobal = true;
3624    }
3625    for (auto imgsubpair : sub_data->second) {
3626        if (ignoreGlobal && !imgsubpair.hasSubresource)
3627            continue;
3628        auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3629        if (img_data != my_data->imageLayoutMap.end()) {
3630            layouts.push_back(img_data->second.layout);
3631        }
3632    }
3633    return true;
3634}
3635
3636// Set the layout on the global level
3637void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3638    VkImage &image = imgpair.image;
3639    // TODO (mlentine): Maybe set format if new? Not used atm.
3640    my_data->imageLayoutMap[imgpair].layout = layout;
3641    // TODO (mlentine): Maybe make vector a set?
3642    auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3643    if (subresource == my_data->imageSubresourceMap[image].end()) {
3644        my_data->imageSubresourceMap[image].push_back(imgpair);
3645    }
3646}
3647
3648void SetLayout(layer_data *my_data, VkImage image, const VkImageLayout &layout) {
3649    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3650    SetLayout(my_data, imgpair, layout);
3651}
3652
3653void SetLayout(layer_data *my_data, VkImage image, VkImageSubresource range, const VkImageLayout &layout) {
3654    ImageSubresourcePair imgpair = {image, true, range};
3655    SetLayout(my_data, imgpair, layout);
3656}
3657
3658// Set the layout on the cmdbuf level
3659void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3660    pCB->imageLayoutMap[imgpair] = node;
3661    // TODO (mlentine): Maybe make vector a set?
3662    auto subresource = std::find(pCB->imageSubresourceMap[image].begin(), pCB->imageSubresourceMap[image].end(), imgpair);
3663    if (subresource == pCB->imageSubresourceMap[image].end()) {
3664        pCB->imageSubresourceMap[image].push_back(imgpair);
3665    }
3666}
3667
3668void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3669    // TODO (mlentine): Maybe make vector a set?
3670    if (std::find(pCB->imageSubresourceMap[image].begin(), pCB->imageSubresourceMap[image].end(), imgpair) !=
3671        pCB->imageSubresourceMap[image].end()) {
3672        pCB->imageLayoutMap[imgpair].layout = layout;
3673    } else {
3674        // TODO (mlentine): Could be expensive and might need to be removed.
3675        assert(imgpair.hasSubresource);
3676        IMAGE_CMD_BUF_LAYOUT_NODE node;
3677        FindLayout(pCB, image, imgpair.subresource, node);
3678        SetLayout(pCB, image, imgpair, {node.initialLayout, layout});
3679    }
3680}
3681
3682void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3683    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3684    SetLayout(pCB, image, imgpair, node);
3685}
3686
3687void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3688    ImageSubresourcePair imgpair = {image, true, range};
3689    SetLayout(pCB, image, imgpair, node);
3690}
3691
3692void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image, const VkImageLayout &layout) {
3693    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3694    SetLayout(pCB, image, imgpair, layout);
3695}
3696
3697void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, const VkImageLayout &layout) {
3698    ImageSubresourcePair imgpair = {image, true, range};
3699    SetLayout(pCB, image, imgpair, layout);
3700}
3701
3702void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
3703    auto image_view_data = dev_data->imageViewMap.find(imageView);
3704    assert(image_view_data != dev_data->imageViewMap.end());
3705    const VkImage &image = image_view_data->second.image;
3706    const VkImageSubresourceRange &subRange = image_view_data->second.subresourceRange;
3707    // TODO: Do not iterate over every possibility - consolidate where possible
3708    for (uint32_t j = 0; j < subRange.levelCount; j++) {
3709        uint32_t level = subRange.baseMipLevel + j;
3710        for (uint32_t k = 0; k < subRange.layerCount; k++) {
3711            uint32_t layer = subRange.baseArrayLayer + k;
3712            VkImageSubresource sub = {subRange.aspectMask, level, layer};
3713            SetLayout(pCB, image, sub, layout);
3714        }
3715    }
3716}
3717
3718// Verify that given imageView is valid
3719static VkBool32 validateImageView(const layer_data *my_data, const VkImageView *pImageView, const VkImageLayout imageLayout) {
3720    VkBool32 skipCall = VK_FALSE;
3721    auto ivIt = my_data->imageViewMap.find(*pImageView);
3722    if (ivIt == my_data->imageViewMap.end()) {
3723        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3724                            (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3725                            "vkUpdateDescriptorSets: Attempt to update descriptor with invalid imageView %#" PRIxLEAST64,
3726                            (uint64_t)*pImageView);
3727    } else {
3728        // Validate that imageLayout is compatible with aspectMask and image format
3729        VkImageAspectFlags aspectMask = ivIt->second.subresourceRange.aspectMask;
3730        VkImage image = ivIt->second.image;
3731        // TODO : Check here in case we have a bad image
3732        VkFormat format = VK_FORMAT_MAX_ENUM;
3733        auto imgIt = my_data->imageMap.find(image);
3734        if (imgIt != my_data->imageMap.end()) {
3735            format = (*imgIt).second.createInfo.format;
3736        } else {
3737            // Also need to check the swapchains.
3738            auto swapchainIt = my_data->device_extensions.imageToSwapchainMap.find(image);
3739            if (swapchainIt != my_data->device_extensions.imageToSwapchainMap.end()) {
3740                VkSwapchainKHR swapchain = swapchainIt->second;
3741                auto swapchain_nodeIt = my_data->device_extensions.swapchainMap.find(swapchain);
3742                if (swapchain_nodeIt != my_data->device_extensions.swapchainMap.end()) {
3743                    SWAPCHAIN_NODE *pswapchain_node = swapchain_nodeIt->second;
3744                    format = pswapchain_node->createInfo.imageFormat;
3745                }
3746            }
3747        }
3748        if (format == VK_FORMAT_MAX_ENUM) {
3749            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3750                                (uint64_t)image, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3751                                "vkUpdateDescriptorSets: Attempt to update descriptor with invalid image %#" PRIxLEAST64
3752                                " in imageView %#" PRIxLEAST64,
3753                                (uint64_t)image, (uint64_t)*pImageView);
3754        } else {
3755            VkBool32 ds = vk_format_is_depth_or_stencil(format);
3756            switch (imageLayout) {
3757            case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
3758                // Only Color bit must be set
3759                if ((aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) {
3760                    skipCall |=
3761                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3762                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3763                                "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL "
3764                                "and imageView %#" PRIxLEAST64 ""
3765                                " that does not have VK_IMAGE_ASPECT_COLOR_BIT set.",
3766                                (uint64_t)*pImageView);
3767                }
3768                // format must NOT be DS
3769                if (ds) {
3770                    skipCall |=
3771                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3772                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3773                                "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL "
3774                                "and imageView %#" PRIxLEAST64 ""
3775                                " but the image format is %s which is not a color format.",
3776                                (uint64_t)*pImageView, string_VkFormat(format));
3777                }
3778                break;
3779            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
3780            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
3781                // Depth or stencil bit must be set, but both must NOT be set
3782                if (aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
3783                    if (aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) {
3784                        // both  must NOT be set
3785                        skipCall |=
3786                            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3787                                    (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3788                                    "vkUpdateDescriptorSets: Updating descriptor with imageView %#" PRIxLEAST64 ""
3789                                    " that has both STENCIL and DEPTH aspects set",
3790                                    (uint64_t)*pImageView);
3791                    }
3792                } else if (!(aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
3793                    // Neither were set
3794                    skipCall |=
3795                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3796                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3797                                "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 ""
3798                                " that does not have STENCIL or DEPTH aspect set.",
3799                                string_VkImageLayout(imageLayout), (uint64_t)*pImageView);
3800                }
3801                // format must be DS
3802                if (!ds) {
3803                    skipCall |=
3804                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3805                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3806                                "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 ""
3807                                " but the image format is %s which is not a depth/stencil format.",
3808                                string_VkImageLayout(imageLayout), (uint64_t)*pImageView, string_VkFormat(format));
3809                }
3810                break;
3811            default:
3812                // anything to check for other layouts?
3813                break;
3814            }
3815        }
3816    }
3817    return skipCall;
3818}
3819
3820// Verify that given bufferView is valid
3821static VkBool32 validateBufferView(const layer_data *my_data, const VkBufferView *pBufferView) {
3822    VkBool32 skipCall = VK_FALSE;
3823    auto sampIt = my_data->bufferViewMap.find(*pBufferView);
3824    if (sampIt == my_data->bufferViewMap.end()) {
3825        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT,
3826                            (uint64_t)*pBufferView, __LINE__, DRAWSTATE_BUFFERVIEW_DESCRIPTOR_ERROR, "DS",
3827                            "vkUpdateDescriptorSets: Attempt to update descriptor with invalid bufferView %#" PRIxLEAST64,
3828                            (uint64_t)*pBufferView);
3829    } else {
3830        // TODO : Any further checks we want to do on the bufferView?
3831    }
3832    return skipCall;
3833}
3834
3835// Verify that given bufferInfo is valid
3836static VkBool32 validateBufferInfo(const layer_data *my_data, const VkDescriptorBufferInfo *pBufferInfo) {
3837    VkBool32 skipCall = VK_FALSE;
3838    auto sampIt = my_data->bufferMap.find(pBufferInfo->buffer);
3839    if (sampIt == my_data->bufferMap.end()) {
3840        skipCall |=
3841            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3842                    (uint64_t)pBufferInfo->buffer, __LINE__, DRAWSTATE_BUFFERINFO_DESCRIPTOR_ERROR, "DS",
3843                    "vkUpdateDescriptorSets: Attempt to update descriptor where bufferInfo has invalid buffer %#" PRIxLEAST64,
3844                    (uint64_t)pBufferInfo->buffer);
3845    } else {
3846        // TODO : Any further checks we want to do on the bufferView?
3847    }
3848    return skipCall;
3849}
3850
3851static VkBool32 validateUpdateContents(const layer_data *my_data, const VkWriteDescriptorSet *pWDS,
3852                                       const VkDescriptorSetLayoutBinding *pLayoutBinding) {
3853    VkBool32 skipCall = VK_FALSE;
3854    // First verify that for the given Descriptor type, the correct DescriptorInfo data is supplied
3855    const VkSampler *pSampler = NULL;
3856    VkBool32 immutable = VK_FALSE;
3857    uint32_t i = 0;
3858    // For given update type, verify that update contents are correct
3859    switch (pWDS->descriptorType) {
3860    case VK_DESCRIPTOR_TYPE_SAMPLER:
3861        for (i = 0; i < pWDS->descriptorCount; ++i) {
3862            skipCall |= validateSampler(my_data, &(pWDS->pImageInfo[i].sampler), immutable);
3863        }
3864        break;
3865    case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3866        for (i = 0; i < pWDS->descriptorCount; ++i) {
3867            if (NULL == pLayoutBinding->pImmutableSamplers) {
3868                pSampler = &(pWDS->pImageInfo[i].sampler);
3869                if (immutable) {
3870                    skipCall |= log_msg(
3871                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3872                        (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS",
3873                        "vkUpdateDescriptorSets: Update #%u is not an immutable sampler %#" PRIxLEAST64
3874                        ", but previous update(s) from this "
3875                        "VkWriteDescriptorSet struct used an immutable sampler. All updates from a single struct must either "
3876                        "use immutable or non-immutable samplers.",
3877                        i, (uint64_t)*pSampler);
3878                }
3879            } else {
3880                if (i > 0 && !immutable) {
3881                    skipCall |= log_msg(
3882                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3883                        (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS",
3884                        "vkUpdateDescriptorSets: Update #%u is an immutable sampler, but previous update(s) from this "
3885                        "VkWriteDescriptorSet struct used a non-immutable sampler. All updates from a single struct must either "
3886                        "use immutable or non-immutable samplers.",
3887                        i);
3888                }
3889                immutable = VK_TRUE;
3890                pSampler = &(pLayoutBinding->pImmutableSamplers[i]);
3891            }
3892            skipCall |= validateSampler(my_data, pSampler, immutable);
3893        }
3894    // Intentionally fall through here to also validate image stuff
3895    case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3896    case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
3897    case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
3898        for (i = 0; i < pWDS->descriptorCount; ++i) {
3899            skipCall |= validateImageView(my_data, &(pWDS->pImageInfo[i].imageView), pWDS->pImageInfo[i].imageLayout);
3900        }
3901        break;
3902    case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
3903    case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
3904        for (i = 0; i < pWDS->descriptorCount; ++i) {
3905            skipCall |= validateBufferView(my_data, &(pWDS->pTexelBufferView[i]));
3906        }
3907        break;
3908    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3909    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3910    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
3911    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
3912        for (i = 0; i < pWDS->descriptorCount; ++i) {
3913            skipCall |= validateBufferInfo(my_data, &(pWDS->pBufferInfo[i]));
3914        }
3915        break;
3916    default:
3917        break;
3918    }
3919    return skipCall;
3920}
3921// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3922// func_str is the name of the calling function
3923// Return VK_FALSE if no errors occur
3924// Return VK_TRUE if validation error occurs and callback returns VK_TRUE (to skip upcoming API call down the chain)
3925VkBool32 validateIdleDescriptorSet(const layer_data *my_data, VkDescriptorSet set, std::string func_str) {
3926    VkBool32 skip_call = VK_FALSE;
3927    auto set_node = my_data->setMap.find(set);
3928    if (set_node == my_data->setMap.end()) {
3929        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3930                             (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3931                             "Cannot call %s() on descriptor set %" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3932                             (uint64_t)(set));
3933    } else {
3934        if (set_node->second->in_use.load()) {
3935            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3936                                 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_OBJECT_INUSE,
3937                                 "DS", "Cannot call %s() on descriptor set %" PRIxLEAST64 " that is in use by a command buffer.",
3938                                 func_str.c_str(), (uint64_t)(set));
3939        }
3940    }
3941    return skip_call;
3942}
3943static void invalidateBoundCmdBuffers(layer_data *dev_data, const SET_NODE *pSet) {
3944    // Flag any CBs this set is bound to as INVALID
3945    for (auto cb : pSet->boundCmdBuffers) {
3946        auto cb_node = dev_data->commandBufferMap.find(cb);
3947        if (cb_node != dev_data->commandBufferMap.end()) {
3948            cb_node->second->state = CB_INVALID;
3949        }
3950    }
3951}
3952// update DS mappings based on write and copy update arrays
3953static VkBool32 dsUpdate(layer_data *my_data, VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pWDS,
3954                         uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pCDS) {
3955    VkBool32 skipCall = VK_FALSE;
3956
3957    LAYOUT_NODE *pLayout = NULL;
3958    VkDescriptorSetLayoutCreateInfo *pLayoutCI = NULL;
3959    // Validate Write updates
3960    uint32_t i = 0;
3961    for (i = 0; i < descriptorWriteCount; i++) {
3962        VkDescriptorSet ds = pWDS[i].dstSet;
3963        SET_NODE *pSet = my_data->setMap[ds];
3964        // Set being updated cannot be in-flight
3965        if ((skipCall = validateIdleDescriptorSet(my_data, ds, "VkUpdateDescriptorSets")) == VK_TRUE)
3966            return skipCall;
3967        // If set is bound to any cmdBuffers, mark them invalid
3968        invalidateBoundCmdBuffers(my_data, pSet);
3969        GENERIC_HEADER *pUpdate = (GENERIC_HEADER *)&pWDS[i];
3970        pLayout = pSet->pLayout;
3971        // First verify valid update struct
3972        if ((skipCall = validUpdateStruct(my_data, device, pUpdate)) == VK_TRUE) {
3973            break;
3974        }
3975        uint32_t binding = 0, endIndex = 0;
3976        binding = pWDS[i].dstBinding;
3977        auto bindingToIndex = pLayout->bindingToIndexMap.find(binding);
3978        // Make sure that layout being updated has the binding being updated
3979        if (bindingToIndex == pLayout->bindingToIndexMap.end()) {
3980            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3981                                (uint64_t)(ds), __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
3982                                "Descriptor Set %" PRIu64 " does not have binding to match "
3983                                "update binding %u for update type "
3984                                "%s!",
3985                                (uint64_t)(ds), binding, string_VkStructureType(pUpdate->sType));
3986        } else {
3987            // Next verify that update falls within size of given binding
3988            endIndex = getUpdateEndIndex(my_data, device, pLayout, binding, pWDS[i].dstArrayElement, pUpdate);
3989            if (getBindingEndIndex(pLayout, binding) < endIndex) {
3990                pLayoutCI = &pLayout->createInfo;
3991                string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS}    ");
3992                skipCall |=
3993                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3994                            (uint64_t)(ds), __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
3995                            "Descriptor update type of %s is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
3996                            string_VkStructureType(pUpdate->sType), binding, DSstr.c_str());
3997            } else { // TODO : should we skip update on a type mismatch or force it?
3998                uint32_t startIndex;
3999                startIndex = getUpdateStartIndex(my_data, device, pLayout, binding, pWDS[i].dstArrayElement, pUpdate);
4000                // Layout bindings match w/ update, now verify that update type
4001                // & stageFlags are the same for entire update
4002                if ((skipCall = validateUpdateConsistency(my_data, device, pLayout, pUpdate, startIndex, endIndex)) == VK_FALSE) {
4003                    // The update is within bounds and consistent, but need to
4004                    // make sure contents make sense as well
4005                    if ((skipCall = validateUpdateContents(my_data, &pWDS[i],
4006                                                           &pLayout->createInfo.pBindings[bindingToIndex->second])) == VK_FALSE) {
4007                        // Update is good. Save the update info
4008                        // Create new update struct for this set's shadow copy
4009                        GENERIC_HEADER *pNewNode = NULL;
4010                        skipCall |= shadowUpdateNode(my_data, device, pUpdate, &pNewNode);
4011                        if (NULL == pNewNode) {
4012                            skipCall |= log_msg(
4013                                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4014                                (uint64_t)(ds), __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
4015                                "Out of memory while attempting to allocate UPDATE struct in vkUpdateDescriptors()");
4016                        } else {
4017                            // Insert shadow node into LL of updates for this set
4018                            pNewNode->pNext = pSet->pUpdateStructs;
4019                            pSet->pUpdateStructs = pNewNode;
4020                            // Now update appropriate descriptor(s) to point to new Update node
4021                            for (uint32_t j = startIndex; j <= endIndex; j++) {
4022                                assert(j < pSet->descriptorCount);
4023                                pSet->ppDescriptors[j] = pNewNode;
4024                            }
4025                        }
4026                    }
4027                }
4028            }
4029        }
4030    }
4031    // Now validate copy updates
4032    for (i = 0; i < descriptorCopyCount; ++i) {
4033        SET_NODE *pSrcSet = NULL, *pDstSet = NULL;
4034        LAYOUT_NODE *pSrcLayout = NULL, *pDstLayout = NULL;
4035        uint32_t srcStartIndex = 0, srcEndIndex = 0, dstStartIndex = 0, dstEndIndex = 0;
4036        // For each copy make sure that update falls within given layout and that types match
4037        pSrcSet = my_data->setMap[pCDS[i].srcSet];
4038        pDstSet = my_data->setMap[pCDS[i].dstSet];
4039        // Set being updated cannot be in-flight
4040        if ((skipCall = validateIdleDescriptorSet(my_data, pDstSet->set, "VkUpdateDescriptorSets")) == VK_TRUE)
4041            return skipCall;
4042        invalidateBoundCmdBuffers(my_data, pDstSet);
4043        pSrcLayout = pSrcSet->pLayout;
4044        pDstLayout = pDstSet->pLayout;
4045        // Validate that src binding is valid for src set layout
4046        if (pSrcLayout->bindingToIndexMap.find(pCDS[i].srcBinding) == pSrcLayout->bindingToIndexMap.end()) {
4047            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4048                                (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
4049                                "Copy descriptor update %u has srcBinding %u "
4050                                "which is out of bounds for underlying SetLayout "
4051                                "%#" PRIxLEAST64 " which only has bindings 0-%u.",
4052                                i, pCDS[i].srcBinding, (uint64_t)pSrcLayout->layout, pSrcLayout->createInfo.bindingCount - 1);
4053        } else if (pDstLayout->bindingToIndexMap.find(pCDS[i].dstBinding) == pDstLayout->bindingToIndexMap.end()) {
4054            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4055                                (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
4056                                "Copy descriptor update %u has dstBinding %u "
4057                                "which is out of bounds for underlying SetLayout "
4058                                "%#" PRIxLEAST64 " which only has bindings 0-%u.",
4059                                i, pCDS[i].dstBinding, (uint64_t)pDstLayout->layout, pDstLayout->createInfo.bindingCount - 1);
4060        } else {
4061            // Proceed with validation. Bindings are ok, but make sure update is within bounds of given layout
4062            srcEndIndex = getUpdateEndIndex(my_data, device, pSrcLayout, pCDS[i].srcBinding, pCDS[i].srcArrayElement,
4063                                            (const GENERIC_HEADER *)&(pCDS[i]));
4064            dstEndIndex = getUpdateEndIndex(my_data, device, pDstLayout, pCDS[i].dstBinding, pCDS[i].dstArrayElement,
4065                                            (const GENERIC_HEADER *)&(pCDS[i]));
4066            if (getBindingEndIndex(pSrcLayout, pCDS[i].srcBinding) < srcEndIndex) {
4067                pLayoutCI = &pSrcLayout->createInfo;
4068                string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS}    ");
4069                skipCall |=
4070                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4071                            (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
4072                            "Copy descriptor src update is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
4073                            pCDS[i].srcBinding, DSstr.c_str());
4074            } else if (getBindingEndIndex(pDstLayout, pCDS[i].dstBinding) < dstEndIndex) {
4075                pLayoutCI = &pDstLayout->createInfo;
4076                string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS}    ");
4077                skipCall |=
4078                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4079                            (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
4080                            "Copy descriptor dest update is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
4081                            pCDS[i].dstBinding, DSstr.c_str());
4082            } else {
4083                srcStartIndex = getUpdateStartIndex(my_data, device, pSrcLayout, pCDS[i].srcBinding, pCDS[i].srcArrayElement,
4084                                                    (const GENERIC_HEADER *)&(pCDS[i]));
4085                dstStartIndex = getUpdateStartIndex(my_data, device, pDstLayout, pCDS[i].dstBinding, pCDS[i].dstArrayElement,
4086                                                    (const GENERIC_HEADER *)&(pCDS[i]));
4087                for (uint32_t j = 0; j < pCDS[i].descriptorCount; ++j) {
4088                    // For copy just make sure that the types match and then perform the update
4089                    if (pSrcLayout->descriptorTypes[srcStartIndex + j] != pDstLayout->descriptorTypes[dstStartIndex + j]) {
4090                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4091                                            __LINE__, DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
4092                                            "Copy descriptor update index %u, update count #%u, has src update descriptor type %s "
4093                                            "that does not match overlapping dest descriptor type of %s!",
4094                                            i, j + 1, string_VkDescriptorType(pSrcLayout->descriptorTypes[srcStartIndex + j]),
4095                                            string_VkDescriptorType(pDstLayout->descriptorTypes[dstStartIndex + j]));
4096                    } else {
4097                        // point dst descriptor at corresponding src descriptor
4098                        // TODO : This may be a hole. I believe copy should be its own copy,
4099                        //  otherwise a subsequent write update to src will incorrectly affect the copy
4100                        pDstSet->ppDescriptors[j + dstStartIndex] = pSrcSet->ppDescriptors[j + srcStartIndex];
4101                        pDstSet->pUpdateStructs = pSrcSet->pUpdateStructs;
4102                    }
4103                }
4104            }
4105        }
4106    }
4107    return skipCall;
4108}
4109
4110// Verify that given pool has descriptors that are being requested for allocation
4111static VkBool32 validate_descriptor_availability_in_pool(layer_data *dev_data, DESCRIPTOR_POOL_NODE *pPoolNode, uint32_t count,
4112                                                         const VkDescriptorSetLayout *pSetLayouts) {
4113    VkBool32 skipCall = VK_FALSE;
4114    uint32_t i = 0, j = 0;
4115    for (i = 0; i < count; ++i) {
4116        LAYOUT_NODE *pLayout = getLayoutNode(dev_data, pSetLayouts[i]);
4117        if (NULL == pLayout) {
4118            skipCall |=
4119                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
4120                        (uint64_t)pSetLayouts[i], __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
4121                        "Unable to find set layout node for layout %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
4122                        (uint64_t)pSetLayouts[i]);
4123        } else {
4124            uint32_t typeIndex = 0, poolSizeCount = 0;
4125            for (j = 0; j < pLayout->createInfo.bindingCount; ++j) {
4126                typeIndex = static_cast<uint32_t>(pLayout->createInfo.pBindings[j].descriptorType);
4127                poolSizeCount = pLayout->createInfo.pBindings[j].descriptorCount;
4128                if (poolSizeCount > pPoolNode->availableDescriptorTypeCount[typeIndex]) {
4129                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4130                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)pLayout->layout, __LINE__,
4131                                        DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS",
4132                                        "Unable to allocate %u descriptors of type %s from pool %#" PRIxLEAST64
4133                                        ". This pool only has %u descriptors of this type remaining.",
4134                                        poolSizeCount, string_VkDescriptorType(pLayout->createInfo.pBindings[j].descriptorType),
4135                                        (uint64_t)pPoolNode->pool, pPoolNode->availableDescriptorTypeCount[typeIndex]);
4136                } else { // Decrement available descriptors of this type
4137                    pPoolNode->availableDescriptorTypeCount[typeIndex] -= poolSizeCount;
4138                }
4139            }
4140        }
4141    }
4142    return skipCall;
4143}
4144
4145// Free the shadowed update node for this Set
4146// NOTE : Calls to this function should be wrapped in mutex
4147static void freeShadowUpdateTree(SET_NODE *pSet) {
4148    GENERIC_HEADER *pShadowUpdate = pSet->pUpdateStructs;
4149    pSet->pUpdateStructs = NULL;
4150    GENERIC_HEADER *pFreeUpdate = pShadowUpdate;
4151    // Clear the descriptor mappings as they will now be invalid
4152    memset(pSet->ppDescriptors, 0, pSet->descriptorCount * sizeof(GENERIC_HEADER *));
4153    while (pShadowUpdate) {
4154        pFreeUpdate = pShadowUpdate;
4155        pShadowUpdate = (GENERIC_HEADER *)pShadowUpdate->pNext;
4156        VkWriteDescriptorSet *pWDS = NULL;
4157        switch (pFreeUpdate->sType) {
4158        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
4159            pWDS = (VkWriteDescriptorSet *)pFreeUpdate;
4160            switch (pWDS->descriptorType) {
4161            case VK_DESCRIPTOR_TYPE_SAMPLER:
4162            case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
4163            case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
4164            case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
4165                delete[] pWDS->pImageInfo;
4166            } break;
4167            case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
4168            case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
4169                delete[] pWDS->pTexelBufferView;
4170            } break;
4171            case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
4172            case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
4173            case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
4174            case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
4175                delete[] pWDS->pBufferInfo;
4176            } break;
4177            default:
4178                break;
4179            }
4180            break;
4181        case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
4182            break;
4183        default:
4184            assert(0);
4185            break;
4186        }
4187        delete pFreeUpdate;
4188    }
4189}
4190
4191// Free all DS Pools including their Sets & related sub-structs
4192// NOTE : Calls to this function should be wrapped in mutex
4193static void deletePools(layer_data *my_data) {
4194    if (my_data->descriptorPoolMap.size() <= 0)
4195        return;
4196    for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
4197        SET_NODE *pSet = (*ii).second->pSets;
4198        SET_NODE *pFreeSet = pSet;
4199        while (pSet) {
4200            pFreeSet = pSet;
4201            pSet = pSet->pNext;
4202            // Freeing layouts handled in deleteLayouts() function
4203            // Free Update shadow struct tree
4204            freeShadowUpdateTree(pFreeSet);
4205            delete[] pFreeSet->ppDescriptors;
4206            delete pFreeSet;
4207        }
4208        delete (*ii).second;
4209    }
4210    my_data->descriptorPoolMap.clear();
4211}
4212
4213// WARN : Once deleteLayouts() called, any layout ptrs in Pool/Set data structure will be invalid
4214// NOTE : Calls to this function should be wrapped in mutex
4215static void deleteLayouts(layer_data *my_data) {
4216    if (my_data->descriptorSetLayoutMap.size() <= 0)
4217        return;
4218    for (auto ii = my_data->descriptorSetLayoutMap.begin(); ii != my_data->descriptorSetLayoutMap.end(); ++ii) {
4219        LAYOUT_NODE *pLayout = (*ii).second;
4220        if (pLayout->createInfo.pBindings) {
4221            for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
4222                delete[] pLayout->createInfo.pBindings[i].pImmutableSamplers;
4223            }
4224            delete[] pLayout->createInfo.pBindings;
4225        }
4226        delete pLayout;
4227    }
4228    my_data->descriptorSetLayoutMap.clear();
4229}
4230
4231// Currently clearing a set is removing all previous updates to that set
4232//  TODO : Validate if this is correct clearing behavior
4233static void clearDescriptorSet(layer_data *my_data, VkDescriptorSet set) {
4234    SET_NODE *pSet = getSetNode(my_data, set);
4235    if (!pSet) {
4236        // TODO : Return error
4237    } else {
4238        freeShadowUpdateTree(pSet);
4239    }
4240}
4241
4242static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
4243                                VkDescriptorPoolResetFlags flags) {
4244    DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pool);
4245    if (!pPool) {
4246        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4247                (uint64_t)pool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
4248                "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkResetDescriptorPool() call", (uint64_t)pool);
4249    } else {
4250        // TODO: validate flags
4251        // For every set off of this pool, clear it
4252        SET_NODE *pSet = pPool->pSets;
4253        while (pSet) {
4254            clearDescriptorSet(my_data, pSet->set);
4255            pSet = pSet->pNext;
4256        }
4257        // Reset available count to max count for this pool
4258        for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
4259            pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
4260        }
4261    }
4262}
4263
4264// For given CB object, fetch associated CB Node from map
4265static GLOBAL_CB_NODE *getCBNode(layer_data *my_data, const VkCommandBuffer cb) {
4266    if (my_data->commandBufferMap.count(cb) == 0) {
4267        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4268                reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4269                "Attempt to use CommandBuffer %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb));
4270        return NULL;
4271    }
4272    return my_data->commandBufferMap[cb];
4273}
4274
4275// Free all CB Nodes
4276// NOTE : Calls to this function should be wrapped in mutex
4277static void deleteCommandBuffers(layer_data *my_data) {
4278    if (my_data->commandBufferMap.size() <= 0) {
4279        return;
4280    }
4281    for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
4282        delete (*ii).second;
4283    }
4284    my_data->commandBufferMap.clear();
4285}
4286
4287static VkBool32 report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
4288    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4289                   (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
4290                   "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
4291}
4292
4293VkBool32 validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
4294    if (!pCB->activeRenderPass)
4295        return VK_FALSE;
4296    VkBool32 skip_call = VK_FALSE;
4297    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS && cmd_type != CMD_EXECUTECOMMANDS) {
4298        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4299                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4300                             "Commands cannot be called in a subpass using secondary command buffers.");
4301    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
4302        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4303                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4304                             "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
4305    }
4306    return skip_call;
4307}
4308
4309static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4310    if (!(flags & VK_QUEUE_GRAPHICS_BIT))
4311        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4312                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4313                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
4314    return false;
4315}
4316
4317static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4318    if (!(flags & VK_QUEUE_COMPUTE_BIT))
4319        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4320                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4321                       "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
4322    return false;
4323}
4324
4325static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4326    if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
4327        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4328                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4329                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
4330    return false;
4331}
4332
4333// Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not
4334//  in the recording state or if there's an issue with the Cmd ordering
4335static VkBool32 addCmd(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
4336    VkBool32 skipCall = VK_FALSE;
4337    auto pool_data = my_data->commandPoolMap.find(pCB->createInfo.commandPool);
4338    if (pool_data != my_data->commandPoolMap.end()) {
4339        VkQueueFlags flags = my_data->physDevProperties.queue_family_properties[pool_data->second.queueFamilyIndex].queueFlags;
4340        switch (cmd) {
4341        case CMD_BINDPIPELINE:
4342        case CMD_BINDPIPELINEDELTA:
4343        case CMD_BINDDESCRIPTORSETS:
4344        case CMD_FILLBUFFER:
4345        case CMD_CLEARCOLORIMAGE:
4346        case CMD_SETEVENT:
4347        case CMD_RESETEVENT:
4348        case CMD_WAITEVENTS:
4349        case CMD_BEGINQUERY:
4350        case CMD_ENDQUERY:
4351        case CMD_RESETQUERYPOOL:
4352        case CMD_COPYQUERYPOOLRESULTS:
4353        case CMD_WRITETIMESTAMP:
4354            skipCall |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
4355            break;
4356        case CMD_SETVIEWPORTSTATE:
4357        case CMD_SETSCISSORSTATE:
4358        case CMD_SETLINEWIDTHSTATE:
4359        case CMD_SETDEPTHBIASSTATE:
4360        case CMD_SETBLENDSTATE:
4361        case CMD_SETDEPTHBOUNDSSTATE:
4362        case CMD_SETSTENCILREADMASKSTATE:
4363        case CMD_SETSTENCILWRITEMASKSTATE:
4364        case CMD_SETSTENCILREFERENCESTATE:
4365        case CMD_BINDINDEXBUFFER:
4366        case CMD_BINDVERTEXBUFFER:
4367        case CMD_DRAW:
4368        case CMD_DRAWINDEXED:
4369        case CMD_DRAWINDIRECT:
4370        case CMD_DRAWINDEXEDINDIRECT:
4371        case CMD_BLITIMAGE:
4372        case CMD_CLEARATTACHMENTS:
4373        case CMD_CLEARDEPTHSTENCILIMAGE:
4374        case CMD_RESOLVEIMAGE:
4375        case CMD_BEGINRENDERPASS:
4376        case CMD_NEXTSUBPASS:
4377        case CMD_ENDRENDERPASS:
4378            skipCall |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
4379            break;
4380        case CMD_DISPATCH:
4381        case CMD_DISPATCHINDIRECT:
4382            skipCall |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
4383            break;
4384        case CMD_COPYBUFFER:
4385        case CMD_COPYIMAGE:
4386        case CMD_COPYBUFFERTOIMAGE:
4387        case CMD_COPYIMAGETOBUFFER:
4388        case CMD_CLONEIMAGEDATA:
4389        case CMD_UPDATEBUFFER:
4390        case CMD_PIPELINEBARRIER:
4391        case CMD_EXECUTECOMMANDS:
4392            break;
4393        default:
4394            break;
4395        }
4396    }
4397    if (pCB->state != CB_RECORDING) {
4398        skipCall |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
4399        skipCall |= validateCmdsInCmdBuffer(my_data, pCB, cmd);
4400        CMD_NODE cmdNode = {};
4401        // init cmd node and append to end of cmd LL
4402        cmdNode.cmdNumber = ++pCB->numCmds;
4403        cmdNode.type = cmd;
4404        pCB->cmds.push_back(cmdNode);
4405    }
4406    return skipCall;
4407}
4408// Reset the command buffer state
4409//  Maintain the createInfo and set state to CB_NEW, but clear all other state
4410static void resetCB(layer_data *my_data, const VkCommandBuffer cb) {
4411    GLOBAL_CB_NODE *pCB = my_data->commandBufferMap[cb];
4412    if (pCB) {
4413        pCB->cmds.clear();
4414        // Reset CB state (note that createInfo is not cleared)
4415        pCB->commandBuffer = cb;
4416        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
4417        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
4418        pCB->numCmds = 0;
4419        memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
4420        pCB->state = CB_NEW;
4421        pCB->submitCount = 0;
4422        pCB->status = 0;
4423        pCB->viewports.clear();
4424        pCB->scissors.clear();
4425        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4426            // Before clearing lastBoundState, remove any CB bindings from all uniqueBoundSets
4427            for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4428                auto set_node = my_data->setMap.find(set);
4429                if (set_node != my_data->setMap.end()) {
4430                    set_node->second->boundCmdBuffers.erase(pCB->commandBuffer);
4431                }
4432            }
4433            pCB->lastBound[i].reset();
4434        }
4435        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
4436        pCB->activeRenderPass = 0;
4437        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
4438        pCB->activeSubpass = 0;
4439        pCB->framebuffer = 0;
4440        pCB->fenceId = 0;
4441        pCB->lastSubmittedFence = VK_NULL_HANDLE;
4442        pCB->lastSubmittedQueue = VK_NULL_HANDLE;
4443        pCB->destroyedSets.clear();
4444        pCB->updatedSets.clear();
4445        pCB->destroyedFramebuffers.clear();
4446        pCB->waitedEvents.clear();
4447        pCB->semaphores.clear();
4448        pCB->events.clear();
4449        pCB->waitedEventsBeforeQueryReset.clear();
4450        pCB->queryToStateMap.clear();
4451        pCB->activeQueries.clear();
4452        pCB->startedQueries.clear();
4453        pCB->imageLayoutMap.clear();
4454        pCB->eventToStageMap.clear();
4455        pCB->drawData.clear();
4456        pCB->currentDrawData.buffers.clear();
4457        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
4458        pCB->secondaryCommandBuffers.clear();
4459    }
4460}
4461
4462// Set PSO-related status bits for CB, including dynamic state set via PSO
4463static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe) {
4464    for (auto const & att : pPipe->attachments) {
4465        if (0 != att.colorWriteMask) {
4466            pCB->status |= CBSTATUS_COLOR_BLEND_WRITE_ENABLE;
4467        }
4468    }
4469    if (pPipe->dsStateCI.depthWriteEnable) {
4470        pCB->status |= CBSTATUS_DEPTH_WRITE_ENABLE;
4471    }
4472    if (pPipe->dsStateCI.stencilTestEnable) {
4473        pCB->status |= CBSTATUS_STENCIL_TEST_ENABLE;
4474    }
4475    // Account for any dynamic state not set via this PSO
4476    if (!pPipe->dynStateCI.dynamicStateCount) { // All state is static
4477        pCB->status = CBSTATUS_ALL;
4478    } else {
4479        // First consider all state on
4480        // Then unset any state that's noted as dynamic in PSO
4481        // Finally OR that into CB statemask
4482        CBStatusFlags psoDynStateMask = CBSTATUS_ALL;
4483        for (uint32_t i = 0; i < pPipe->dynStateCI.dynamicStateCount; i++) {
4484            switch (pPipe->dynStateCI.pDynamicStates[i]) {
4485            case VK_DYNAMIC_STATE_VIEWPORT:
4486                psoDynStateMask &= ~CBSTATUS_VIEWPORT_SET;
4487                break;
4488            case VK_DYNAMIC_STATE_SCISSOR:
4489                psoDynStateMask &= ~CBSTATUS_SCISSOR_SET;
4490                break;
4491            case VK_DYNAMIC_STATE_LINE_WIDTH:
4492                psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
4493                break;
4494            case VK_DYNAMIC_STATE_DEPTH_BIAS:
4495                psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
4496                break;
4497            case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
4498                psoDynStateMask &= ~CBSTATUS_BLEND_SET;
4499                break;
4500            case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
4501                psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
4502                break;
4503            case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
4504                psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
4505                break;
4506            case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
4507                psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
4508                break;
4509            case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
4510                psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
4511                break;
4512            default:
4513                // TODO : Flag error here
4514                break;
4515            }
4516        }
4517        pCB->status |= psoDynStateMask;
4518    }
4519}
4520
4521// Print the last bound Gfx Pipeline
4522static VkBool32 printPipeline(layer_data *my_data, const VkCommandBuffer cb) {
4523    VkBool32 skipCall = VK_FALSE;
4524    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4525    if (pCB) {
4526        PIPELINE_NODE *pPipeTrav = getPipeline(my_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
4527        if (!pPipeTrav) {
4528            // nothing to print
4529        } else {
4530            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4531                                __LINE__, DRAWSTATE_NONE, "DS", "%s",
4532                                vk_print_vkgraphicspipelinecreateinfo(&pPipeTrav->graphicsPipelineCI, "{DS}").c_str());
4533        }
4534    }
4535    return skipCall;
4536}
4537
4538static void printCB(layer_data *my_data, const VkCommandBuffer cb) {
4539    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4540    if (pCB && pCB->cmds.size() > 0) {
4541        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4542                DRAWSTATE_NONE, "DS", "Cmds in CB %p", (void *)cb);
4543        vector<CMD_NODE> cmds = pCB->cmds;
4544        for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) {
4545            // TODO : Need to pass cb as srcObj here
4546            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4547                    __LINE__, DRAWSTATE_NONE, "DS", "  CMD#%" PRIu64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str());
4548        }
4549    } else {
4550        // Nothing to print
4551    }
4552}
4553
4554static VkBool32 synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
4555    VkBool32 skipCall = VK_FALSE;
4556    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
4557        return skipCall;
4558    }
4559    skipCall |= printPipeline(my_data, cb);
4560    return skipCall;
4561}
4562
4563// Flags validation error if the associated call is made inside a render pass. The apiName
4564// routine should ONLY be called outside a render pass.
4565static VkBool32 insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4566    VkBool32 inside = VK_FALSE;
4567    if (pCB->activeRenderPass) {
4568        inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4569                         (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
4570                         "%s: It is invalid to issue this call inside an active render pass (%#" PRIxLEAST64 ")", apiName,
4571                         (uint64_t)pCB->activeRenderPass);
4572    }
4573    return inside;
4574}
4575
4576// Flags validation error if the associated call is made outside a render pass. The apiName
4577// routine should ONLY be called inside a render pass.
4578static VkBool32 outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4579    VkBool32 outside = VK_FALSE;
4580    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
4581        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
4582         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
4583        outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4584                          (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS",
4585                          "%s: This call must be issued inside an active render pass.", apiName);
4586    }
4587    return outside;
4588}
4589
4590static void init_core_validation(layer_data *my_data, const VkAllocationCallbacks *pAllocator) {
4591
4592    layer_debug_actions(my_data->report_data, my_data->logging_callback, pAllocator, "lunarg_core_validation");
4593
4594    if (!globalLockInitialized) {
4595        loader_platform_thread_create_mutex(&globalLock);
4596        globalLockInitialized = 1;
4597    }
4598#if MTMERGE
4599    // Zero out memory property data
4600    memset(&memProps, 0, sizeof(VkPhysicalDeviceMemoryProperties));
4601#endif
4602}
4603
4604VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4605vkCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
4606    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4607
4608    assert(chain_info->u.pLayerInfo);
4609    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4610    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
4611    if (fpCreateInstance == NULL)
4612        return VK_ERROR_INITIALIZATION_FAILED;
4613
4614    // Advance the link info for the next element on the chain
4615    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4616
4617    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
4618    if (result != VK_SUCCESS)
4619        return result;
4620
4621    layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
4622    my_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
4623    layer_init_instance_dispatch_table(*pInstance, my_data->instance_dispatch_table, fpGetInstanceProcAddr);
4624
4625    my_data->report_data = debug_report_create_instance(my_data->instance_dispatch_table, *pInstance,
4626                                                        pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
4627
4628    init_core_validation(my_data, pAllocator);
4629
4630    ValidateLayerOrdering(*pCreateInfo);
4631
4632    return result;
4633}
4634
4635/* hook DestroyInstance to remove tableInstanceMap entry */
4636VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
4637    // TODOSC : Shouldn't need any customization here
4638    dispatch_key key = get_dispatch_key(instance);
4639    // TBD: Need any locking this early, in case this function is called at the
4640    // same time by more than one thread?
4641    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
4642    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
4643    pTable->DestroyInstance(instance, pAllocator);
4644
4645    loader_platform_thread_lock_mutex(&globalLock);
4646    // Clean up logging callback, if any
4647    while (my_data->logging_callback.size() > 0) {
4648        VkDebugReportCallbackEXT callback = my_data->logging_callback.back();
4649        layer_destroy_msg_callback(my_data->report_data, callback, pAllocator);
4650        my_data->logging_callback.pop_back();
4651    }
4652
4653    layer_debug_report_destroy_instance(my_data->report_data);
4654    delete my_data->instance_dispatch_table;
4655    layer_data_map.erase(key);
4656    loader_platform_thread_unlock_mutex(&globalLock);
4657    if (layer_data_map.empty()) {
4658        // Release mutex when destroying last instance.
4659        loader_platform_thread_delete_mutex(&globalLock);
4660        globalLockInitialized = 0;
4661    }
4662}
4663
4664static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
4665    uint32_t i;
4666    // TBD: Need any locking, in case this function is called at the same time
4667    // by more than one thread?
4668    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4669    dev_data->device_extensions.wsi_enabled = false;
4670
4671    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4672    PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
4673    pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
4674    pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
4675    pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
4676    pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
4677    pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
4678
4679    for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4680        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
4681            dev_data->device_extensions.wsi_enabled = true;
4682    }
4683}
4684
4685VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
4686                                                              const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
4687    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4688
4689    assert(chain_info->u.pLayerInfo);
4690    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4691    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
4692    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
4693    if (fpCreateDevice == NULL) {
4694        return VK_ERROR_INITIALIZATION_FAILED;
4695    }
4696
4697    // Advance the link info for the next element on the chain
4698    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4699
4700    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
4701    if (result != VK_SUCCESS) {
4702        return result;
4703    }
4704
4705    loader_platform_thread_lock_mutex(&globalLock);
4706    layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
4707    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
4708
4709    // Setup device dispatch table
4710    my_device_data->device_dispatch_table = new VkLayerDispatchTable;
4711    layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr);
4712
4713    my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
4714    createDeviceRegisterExtensions(pCreateInfo, *pDevice);
4715    // Get physical device limits for this device
4716    my_instance_data->instance_dispatch_table->GetPhysicalDeviceProperties(gpu, &(my_device_data->physDevProperties.properties));
4717    uint32_t count;
4718    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
4719    my_device_data->physDevProperties.queue_family_properties.resize(count);
4720    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(
4721        gpu, &count, &my_device_data->physDevProperties.queue_family_properties[0]);
4722    // TODO: device limits should make sure these are compatible
4723    if (pCreateInfo->pEnabledFeatures) {
4724        my_device_data->physDevProperties.features = *pCreateInfo->pEnabledFeatures;
4725    } else {
4726        memset(&my_device_data->physDevProperties.features, 0, sizeof(VkPhysicalDeviceFeatures));
4727    }
4728    loader_platform_thread_unlock_mutex(&globalLock);
4729
4730    ValidateLayerOrdering(*pCreateInfo);
4731
4732    return result;
4733}
4734
4735// prototype
4736static void deleteRenderPasses(layer_data *);
4737VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
4738    // TODOSC : Shouldn't need any customization here
4739    dispatch_key key = get_dispatch_key(device);
4740    layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4741    // Free all the memory
4742    loader_platform_thread_lock_mutex(&globalLock);
4743    deletePipelines(dev_data);
4744    deleteRenderPasses(dev_data);
4745    deleteCommandBuffers(dev_data);
4746    deletePools(dev_data);
4747    deleteLayouts(dev_data);
4748    dev_data->imageViewMap.clear();
4749    dev_data->imageMap.clear();
4750    dev_data->imageSubresourceMap.clear();
4751    dev_data->imageLayoutMap.clear();
4752    dev_data->bufferViewMap.clear();
4753    dev_data->bufferMap.clear();
4754    loader_platform_thread_unlock_mutex(&globalLock);
4755#if MTMERGE
4756    VkBool32 skipCall = VK_FALSE;
4757    loader_platform_thread_lock_mutex(&globalLock);
4758    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4759            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()");
4760    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4761            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================");
4762    print_mem_list(dev_data, device);
4763    printCBList(dev_data, device);
4764    delete_cmd_buf_info_list(dev_data);
4765    // Report any memory leaks
4766    DEVICE_MEM_INFO *pInfo = NULL;
4767    if (dev_data->memObjMap.size() > 0) {
4768        for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
4769            pInfo = &(*ii).second;
4770            if (pInfo->allocInfo.allocationSize != 0) {
4771                // Valid Usage: All child objects created on device must have been destroyed prior to destroying device
4772                skipCall |=
4773                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4774                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK,
4775                            "MEM", "Mem Object %" PRIu64 " has not been freed. You should clean up this memory by calling "
4776                                   "vkFreeMemory(%" PRIu64 ") prior to vkDestroyDevice().",
4777                            (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem));
4778            }
4779        }
4780    }
4781    // Queues persist until device is destroyed
4782    delete_queue_info_list(dev_data);
4783    layer_debug_report_destroy_device(device);
4784    loader_platform_thread_unlock_mutex(&globalLock);
4785
4786#if DISPATCH_MAP_DEBUG
4787    fprintf(stderr, "Device: %p, key: %p\n", device, key);
4788#endif
4789    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4790    if (VK_FALSE == skipCall) {
4791        pDisp->DestroyDevice(device, pAllocator);
4792    }
4793#else
4794    dev_data->device_dispatch_table->DestroyDevice(device, pAllocator);
4795#endif
4796    delete dev_data->device_dispatch_table;
4797    layer_data_map.erase(key);
4798}
4799
4800#if MTMERGE
4801VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
4802vkGetPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties *pMemoryProperties) {
4803    layer_data *my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
4804    VkLayerInstanceDispatchTable *pInstanceTable = my_data->instance_dispatch_table;
4805    pInstanceTable->GetPhysicalDeviceMemoryProperties(physicalDevice, pMemoryProperties);
4806    memcpy(&memProps, pMemoryProperties, sizeof(VkPhysicalDeviceMemoryProperties));
4807}
4808#endif
4809
4810static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4811
4812VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4813vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
4814    return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
4815}
4816
4817VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4818vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
4819    return util_GetLayerProperties(ARRAY_SIZE(cv_global_layers), cv_global_layers, pCount, pProperties);
4820}
4821
4822// TODO: Why does this exist - can we just use global?
4823static const VkLayerProperties cv_device_layers[] = {{
4824    "VK_LAYER_LUNARG_core_validation", VK_API_VERSION, 1, "LunarG Validation Layer",
4825}};
4826
4827VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
4828                                                                                    const char *pLayerName, uint32_t *pCount,
4829                                                                                    VkExtensionProperties *pProperties) {
4830    if (pLayerName == NULL) {
4831        dispatch_key key = get_dispatch_key(physicalDevice);
4832        layer_data *my_data = get_my_data_ptr(key, layer_data_map);
4833        return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
4834    } else {
4835        return util_GetExtensionProperties(0, NULL, pCount, pProperties);
4836    }
4837}
4838
4839VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4840vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
4841    /* draw_state physical device layers are the same as global */
4842    return util_GetLayerProperties(ARRAY_SIZE(cv_device_layers), cv_device_layers, pCount, pProperties);
4843}
4844
4845// This validates that the initial layout specified in the command buffer for
4846// the IMAGE is the same
4847// as the global IMAGE layout
4848VkBool32 ValidateCmdBufImageLayouts(VkCommandBuffer cmdBuffer) {
4849    VkBool32 skip_call = VK_FALSE;
4850    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
4851    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
4852    for (auto cb_image_data : pCB->imageLayoutMap) {
4853        VkImageLayout imageLayout;
4854        if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4855            skip_call |=
4856                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4857                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image %" PRIu64 ".",
4858                        reinterpret_cast<const uint64_t &>(cb_image_data.first));
4859        } else {
4860            if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4861                // TODO: Set memory invalid which is in mem_tracker currently
4862            } else if (imageLayout != cb_image_data.second.initialLayout) {
4863                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4864                                     VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT,
4865                                     "DS", "Cannot submit cmd buffer using image with layout %s when "
4866                                           "first use is %s.",
4867                                     string_VkImageLayout(imageLayout), string_VkImageLayout(cb_image_data.second.initialLayout));
4868            }
4869            SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4870        }
4871    }
4872    return skip_call;
4873}
4874// Track which resources are in-flight by atomically incrementing their "in_use" count
4875VkBool32 validateAndIncrementResources(layer_data *my_data, GLOBAL_CB_NODE *pCB) {
4876    VkBool32 skip_call = VK_FALSE;
4877    for (auto drawDataElement : pCB->drawData) {
4878        for (auto buffer : drawDataElement.buffers) {
4879            auto buffer_data = my_data->bufferMap.find(buffer);
4880            if (buffer_data == my_data->bufferMap.end()) {
4881                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4882                                     (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4883                                     "Cannot submit cmd buffer using deleted buffer %" PRIu64 ".", (uint64_t)(buffer));
4884            } else {
4885                buffer_data->second.in_use.fetch_add(1);
4886            }
4887        }
4888    }
4889    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4890        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4891            auto setNode = my_data->setMap.find(set);
4892            if (setNode == my_data->setMap.end()) {
4893                skip_call |=
4894                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4895                            (uint64_t)(set), __LINE__, DRAWSTATE_INVALID_DESCRIPTOR_SET, "DS",
4896                            "Cannot submit cmd buffer using deleted descriptor set %" PRIu64 ".", (uint64_t)(set));
4897            } else {
4898                setNode->second->in_use.fetch_add(1);
4899            }
4900        }
4901    }
4902    for (auto semaphore : pCB->semaphores) {
4903        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4904        if (semaphoreNode == my_data->semaphoreMap.end()) {
4905            skip_call |=
4906                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4907                        reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
4908                        "Cannot submit cmd buffer using deleted semaphore %" PRIu64 ".", reinterpret_cast<uint64_t &>(semaphore));
4909        } else {
4910            semaphoreNode->second.in_use.fetch_add(1);
4911        }
4912    }
4913    for (auto event : pCB->events) {
4914        auto eventNode = my_data->eventMap.find(event);
4915        if (eventNode == my_data->eventMap.end()) {
4916            skip_call |=
4917                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4918                        reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
4919                        "Cannot submit cmd buffer using deleted event %" PRIu64 ".", reinterpret_cast<uint64_t &>(event));
4920        } else {
4921            eventNode->second.in_use.fetch_add(1);
4922        }
4923    }
4924    return skip_call;
4925}
4926
4927void decrementResources(layer_data *my_data, VkCommandBuffer cmdBuffer) {
4928    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
4929    for (auto drawDataElement : pCB->drawData) {
4930        for (auto buffer : drawDataElement.buffers) {
4931            auto buffer_data = my_data->bufferMap.find(buffer);
4932            if (buffer_data != my_data->bufferMap.end()) {
4933                buffer_data->second.in_use.fetch_sub(1);
4934            }
4935        }
4936    }
4937    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4938        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4939            auto setNode = my_data->setMap.find(set);
4940            if (setNode != my_data->setMap.end()) {
4941                setNode->second->in_use.fetch_sub(1);
4942            }
4943        }
4944    }
4945    for (auto semaphore : pCB->semaphores) {
4946        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4947        if (semaphoreNode != my_data->semaphoreMap.end()) {
4948            semaphoreNode->second.in_use.fetch_sub(1);
4949        }
4950    }
4951    for (auto event : pCB->events) {
4952        auto eventNode = my_data->eventMap.find(event);
4953        if (eventNode != my_data->eventMap.end()) {
4954            eventNode->second.in_use.fetch_sub(1);
4955        }
4956    }
4957    for (auto queryStatePair : pCB->queryToStateMap) {
4958        my_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4959    }
4960    for (auto eventStagePair : pCB->eventToStageMap) {
4961        my_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4962    }
4963}
4964
4965void decrementResources(layer_data *my_data, uint32_t fenceCount, const VkFence *pFences) {
4966    for (uint32_t i = 0; i < fenceCount; ++i) {
4967        auto fence_data = my_data->fenceMap.find(pFences[i]);
4968        if (fence_data == my_data->fenceMap.end() || !fence_data->second.needsSignaled)
4969            return;
4970        fence_data->second.needsSignaled = false;
4971        fence_data->second.in_use.fetch_sub(1);
4972        decrementResources(my_data, fence_data->second.priorFences.size(), fence_data->second.priorFences.data());
4973        for (auto cmdBuffer : fence_data->second.cmdBuffers) {
4974            decrementResources(my_data, cmdBuffer);
4975        }
4976    }
4977}
4978
4979void decrementResources(layer_data *my_data, VkQueue queue) {
4980    auto queue_data = my_data->queueMap.find(queue);
4981    if (queue_data != my_data->queueMap.end()) {
4982        for (auto cmdBuffer : queue_data->second.untrackedCmdBuffers) {
4983            decrementResources(my_data, cmdBuffer);
4984        }
4985        queue_data->second.untrackedCmdBuffers.clear();
4986        decrementResources(my_data, queue_data->second.lastFences.size(), queue_data->second.lastFences.data());
4987    }
4988}
4989
4990void updateTrackedCommandBuffers(layer_data *dev_data, VkQueue queue, VkQueue other_queue, VkFence fence) {
4991    if (queue == other_queue) {
4992        return;
4993    }
4994    auto queue_data = dev_data->queueMap.find(queue);
4995    auto other_queue_data = dev_data->queueMap.find(other_queue);
4996    if (queue_data == dev_data->queueMap.end() || other_queue_data == dev_data->queueMap.end()) {
4997        return;
4998    }
4999    for (auto fence : other_queue_data->second.lastFences) {
5000        queue_data->second.lastFences.push_back(fence);
5001    }
5002    if (fence != VK_NULL_HANDLE) {
5003        auto fence_data = dev_data->fenceMap.find(fence);
5004        if (fence_data == dev_data->fenceMap.end()) {
5005            return;
5006        }
5007        for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) {
5008            fence_data->second.cmdBuffers.push_back(cmdbuffer);
5009        }
5010        other_queue_data->second.untrackedCmdBuffers.clear();
5011    } else {
5012        for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) {
5013            queue_data->second.untrackedCmdBuffers.push_back(cmdbuffer);
5014        }
5015        other_queue_data->second.untrackedCmdBuffers.clear();
5016    }
5017}
5018
5019void trackCommandBuffers(layer_data *my_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
5020    auto queue_data = my_data->queueMap.find(queue);
5021    if (fence != VK_NULL_HANDLE) {
5022        vector<VkFence> prior_fences;
5023        auto fence_data = my_data->fenceMap.find(fence);
5024        if (fence_data == my_data->fenceMap.end()) {
5025            return;
5026        }
5027        if (queue_data != my_data->queueMap.end()) {
5028            prior_fences = queue_data->second.lastFences;
5029            queue_data->second.lastFences.clear();
5030            queue_data->second.lastFences.push_back(fence);
5031            for (auto cmdbuffer : queue_data->second.untrackedCmdBuffers) {
5032                fence_data->second.cmdBuffers.push_back(cmdbuffer);
5033            }
5034            queue_data->second.untrackedCmdBuffers.clear();
5035        }
5036        fence_data->second.cmdBuffers.clear();
5037        fence_data->second.priorFences = prior_fences;
5038        fence_data->second.needsSignaled = true;
5039        fence_data->second.queue = queue;
5040        fence_data->second.in_use.fetch_add(1);
5041        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5042            const VkSubmitInfo *submit = &pSubmits[submit_idx];
5043            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5044                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5045                    fence_data->second.cmdBuffers.push_back(secondaryCmdBuffer);
5046                }
5047                fence_data->second.cmdBuffers.push_back(submit->pCommandBuffers[i]);
5048            }
5049        }
5050    } else {
5051        if (queue_data != my_data->queueMap.end()) {
5052            for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5053                const VkSubmitInfo *submit = &pSubmits[submit_idx];
5054                for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5055                    for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5056                        queue_data->second.untrackedCmdBuffers.push_back(secondaryCmdBuffer);
5057                    }
5058                    queue_data->second.untrackedCmdBuffers.push_back(submit->pCommandBuffers[i]);
5059                }
5060            }
5061        }
5062    }
5063    if (queue_data != my_data->queueMap.end()) {
5064        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5065            const VkSubmitInfo *submit = &pSubmits[submit_idx];
5066            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5067                // Add cmdBuffers to both the global set and queue set
5068                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5069                    my_data->globalInFlightCmdBuffers.insert(secondaryCmdBuffer);
5070                    queue_data->second.inFlightCmdBuffers.insert(secondaryCmdBuffer);
5071                }
5072                my_data->globalInFlightCmdBuffers.insert(submit->pCommandBuffers[i]);
5073                queue_data->second.inFlightCmdBuffers.insert(submit->pCommandBuffers[i]);
5074            }
5075        }
5076    }
5077}
5078
5079bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5080    bool skip_call = false;
5081    if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
5082        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
5083        skip_call |=
5084            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5085                    __LINE__, DRAWSTATE_INVALID_FENCE, "DS", "Command Buffer %#" PRIx64 " is already in use and is not marked "
5086                                                             "for simultaneous use.",
5087                    reinterpret_cast<uint64_t>(pCB->commandBuffer));
5088    }
5089    return skip_call;
5090}
5091
5092static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5093    bool skipCall = false;
5094    // Validate that cmd buffers have been updated
5095    if (CB_RECORDED != pCB->state) {
5096        if (CB_INVALID == pCB->state) {
5097            // Inform app of reason CB invalid
5098            bool causeReported = false;
5099            if (!pCB->destroyedSets.empty()) {
5100                std::stringstream set_string;
5101                for (auto set : pCB->destroyedSets)
5102                    set_string << " " << set;
5103
5104                skipCall |=
5105                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5106                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5107                            "You are submitting command buffer %#" PRIxLEAST64
5108                            " that is invalid because it had the following bound descriptor set(s) destroyed: %s",
5109                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
5110                causeReported = true;
5111            }
5112            if (!pCB->updatedSets.empty()) {
5113                std::stringstream set_string;
5114                for (auto set : pCB->updatedSets)
5115                    set_string << " " << set;
5116
5117                skipCall |=
5118                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5119                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5120                            "You are submitting command buffer %#" PRIxLEAST64
5121                            " that is invalid because it had the following bound descriptor set(s) updated: %s",
5122                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
5123                causeReported = true;
5124            }
5125            if (!pCB->destroyedFramebuffers.empty()) {
5126                std::stringstream fb_string;
5127                for (auto fb : pCB->destroyedFramebuffers)
5128                    fb_string << " " << fb;
5129
5130                skipCall |=
5131                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5132                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5133                            "You are submitting command buffer %#" PRIxLEAST64 " that is invalid because it had the following "
5134                            "referenced framebuffers destroyed: %s",
5135                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), fb_string.str().c_str());
5136                causeReported = true;
5137            }
5138            // TODO : This is defensive programming to make sure an error is
5139            //  flagged if we hit this INVALID cmd buffer case and none of the
5140            //  above cases are hit. As the number of INVALID cases grows, this
5141            //  code should be updated to seemlessly handle all the cases.
5142            if (!causeReported) {
5143                skipCall |= log_msg(
5144                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5145                    reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5146                    "You are submitting command buffer %#" PRIxLEAST64 " that is invalid due to an unknown cause. Validation "
5147                    "should "
5148                    "be improved to report the exact cause.",
5149                    reinterpret_cast<uint64_t &>(pCB->commandBuffer));
5150            }
5151        } else { // Flag error for using CB w/o vkEndCommandBuffer() called
5152            skipCall |=
5153                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5154                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
5155                        "You must call vkEndCommandBuffer() on CB %#" PRIxLEAST64 " before this call to vkQueueSubmit()!",
5156                        (uint64_t)(pCB->commandBuffer));
5157        }
5158    }
5159    return skipCall;
5160}
5161
5162static VkBool32 validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5163    // Track in-use for resources off of primary and any secondary CBs
5164    VkBool32 skipCall = validateAndIncrementResources(dev_data, pCB);
5165    if (!pCB->secondaryCommandBuffers.empty()) {
5166        for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
5167            skipCall |= validateAndIncrementResources(dev_data, dev_data->commandBufferMap[secondaryCmdBuffer]);
5168            GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
5169            if (pSubCB->primaryCommandBuffer != pCB->commandBuffer) {
5170                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5171                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
5172                        "CB %#" PRIxLEAST64 " was submitted with secondary buffer %#" PRIxLEAST64
5173                        " but that buffer has subsequently been bound to "
5174                        "primary cmd buffer %#" PRIxLEAST64 ".",
5175                        reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer),
5176                        reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer));
5177            }
5178        }
5179    }
5180    // TODO : Verify if this also needs to be checked for secondary command
5181    //  buffers. If so, this block of code can move to
5182    //   validateCommandBufferState() function. vulkan GL106 filed to clarify
5183    if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
5184        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5185                            __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
5186                            "CB %#" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
5187                            "set, but has been submitted %#" PRIxLEAST64 " times.",
5188                            (uint64_t)(pCB->commandBuffer), pCB->submitCount);
5189    }
5190    skipCall |= validateCommandBufferState(dev_data, pCB);
5191    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
5192    // on device
5193    skipCall |= validateCommandBufferSimultaneousUse(dev_data, pCB);
5194    return skipCall;
5195}
5196
5197VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5198vkQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
5199    VkBool32 skipCall = VK_FALSE;
5200    GLOBAL_CB_NODE *pCBNode = NULL;
5201    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5202    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5203    loader_platform_thread_lock_mutex(&globalLock);
5204#if MTMERGE
5205    // TODO : Need to track fence and clear mem references when fence clears
5206    // MTMTODO : Merge this code with code below to avoid duplicating efforts
5207    uint64_t fenceId = 0;
5208    skipCall = add_fence_info(dev_data, fence, queue, &fenceId);
5209
5210    print_mem_list(dev_data, queue);
5211    printCBList(dev_data, queue);
5212    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5213        const VkSubmitInfo *submit = &pSubmits[submit_idx];
5214        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
5215            pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
5216            if (pCBNode) {
5217                pCBNode->fenceId = fenceId;
5218                pCBNode->lastSubmittedFence = fence;
5219                pCBNode->lastSubmittedQueue = queue;
5220                for (auto &function : pCBNode->validate_functions) {
5221                    skipCall |= function();
5222                }
5223            }
5224        }
5225
5226        for (uint32_t i = 0; i < submit->waitSemaphoreCount; i++) {
5227            VkSemaphore sem = submit->pWaitSemaphores[i];
5228
5229            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
5230                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_SIGNALLED) {
5231                    skipCall =
5232                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5233                                (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
5234                                "vkQueueSubmit: Semaphore must be in signaled state before passing to pWaitSemaphores");
5235                }
5236                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_WAIT;
5237            }
5238        }
5239        for (uint32_t i = 0; i < submit->signalSemaphoreCount; i++) {
5240            VkSemaphore sem = submit->pSignalSemaphores[i];
5241
5242            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
5243                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_UNSET) {
5244                    skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5245                                       VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, (uint64_t)sem, __LINE__, MEMTRACK_NONE,
5246                                       "SEMAPHORE", "vkQueueSubmit: Semaphore must not be currently signaled or in a wait state");
5247                }
5248                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
5249            }
5250        }
5251    }
5252#endif
5253    // First verify that fence is not in use
5254    if ((fence != VK_NULL_HANDLE) && (submitCount != 0) && dev_data->fenceMap[fence].in_use.load()) {
5255        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5256                            (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5257                            "Fence %#" PRIx64 " is already in use by another submission.", (uint64_t)(fence));
5258    }
5259    // Now verify each individual submit
5260    std::unordered_set<VkQueue> processed_other_queues;
5261    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5262        const VkSubmitInfo *submit = &pSubmits[submit_idx];
5263        vector<VkSemaphore> semaphoreList;
5264        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
5265            const VkSemaphore &semaphore = submit->pWaitSemaphores[i];
5266            semaphoreList.push_back(semaphore);
5267            if (dev_data->semaphoreMap[semaphore].signaled) {
5268                dev_data->semaphoreMap[semaphore].signaled = 0;
5269            } else {
5270                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5271                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS,
5272                                    "DS", "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
5273                                    reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
5274            }
5275            const VkQueue &other_queue = dev_data->semaphoreMap[semaphore].queue;
5276            if (other_queue != VK_NULL_HANDLE && !processed_other_queues.count(other_queue)) {
5277                updateTrackedCommandBuffers(dev_data, queue, other_queue, fence);
5278                processed_other_queues.insert(other_queue);
5279            }
5280        }
5281        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
5282            const VkSemaphore &semaphore = submit->pSignalSemaphores[i];
5283            semaphoreList.push_back(semaphore);
5284            if (dev_data->semaphoreMap[semaphore].signaled) {
5285                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5286                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS,
5287                                    "DS", "Queue %#" PRIx64 " is signaling semaphore %#" PRIx64
5288                                          " that has already been signaled but not waited on by queue %#" PRIx64 ".",
5289                                    reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore),
5290                                    reinterpret_cast<uint64_t &>(dev_data->semaphoreMap[semaphore].queue));
5291            } else {
5292                dev_data->semaphoreMap[semaphore].signaled = 1;
5293                dev_data->semaphoreMap[semaphore].queue = queue;
5294            }
5295        }
5296        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
5297            skipCall |= ValidateCmdBufImageLayouts(submit->pCommandBuffers[i]);
5298            pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
5299            pCBNode->semaphores = semaphoreList;
5300            pCBNode->submitCount++; // increment submit count
5301            skipCall |= validatePrimaryCommandBufferState(dev_data, pCBNode);
5302        }
5303    }
5304    // Update cmdBuffer-related data structs and mark fence in-use
5305    trackCommandBuffers(dev_data, queue, submitCount, pSubmits, fence);
5306    loader_platform_thread_unlock_mutex(&globalLock);
5307    if (VK_FALSE == skipCall)
5308        result = dev_data->device_dispatch_table->QueueSubmit(queue, submitCount, pSubmits, fence);
5309#if MTMERGE
5310    loader_platform_thread_lock_mutex(&globalLock);
5311    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5312        const VkSubmitInfo *submit = &pSubmits[submit_idx];
5313        for (uint32_t i = 0; i < submit->waitSemaphoreCount; i++) {
5314            VkSemaphore sem = submit->pWaitSemaphores[i];
5315
5316            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
5317                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_UNSET;
5318            }
5319        }
5320    }
5321    loader_platform_thread_unlock_mutex(&globalLock);
5322#endif
5323    return result;
5324}
5325
5326#if MTMERGE
5327VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
5328                                                                const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
5329    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5330    VkResult result = my_data->device_dispatch_table->AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
5331    // TODO : Track allocations and overall size here
5332    loader_platform_thread_lock_mutex(&globalLock);
5333    add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo);
5334    print_mem_list(my_data, device);
5335    loader_platform_thread_unlock_mutex(&globalLock);
5336    return result;
5337}
5338
5339VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5340vkFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
5341    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5342
5343    // From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed.
5344    // Before freeing a memory object, an application must ensure the memory object is no longer
5345    // in use by the device—for example by command buffers queued for execution. The memory need
5346    // not yet be unbound from all images and buffers, but any further use of those images or
5347    // buffers (on host or device) for anything other than destroying those objects will result in
5348    // undefined behavior.
5349
5350    loader_platform_thread_lock_mutex(&globalLock);
5351    freeMemObjInfo(my_data, device, mem, VK_FALSE);
5352    print_mem_list(my_data, device);
5353    printCBList(my_data, device);
5354    loader_platform_thread_unlock_mutex(&globalLock);
5355    my_data->device_dispatch_table->FreeMemory(device, mem, pAllocator);
5356}
5357
5358VkBool32 validateMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5359    VkBool32 skipCall = VK_FALSE;
5360
5361    if (size == 0) {
5362        // TODO: a size of 0 is not listed as an invalid use in the spec, should it be?
5363        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5364                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5365                           "VkMapMemory: Attempting to map memory range of size zero");
5366    }
5367
5368    auto mem_element = my_data->memObjMap.find(mem);
5369    if (mem_element != my_data->memObjMap.end()) {
5370        // It is an application error to call VkMapMemory on an object that is already mapped
5371        if (mem_element->second.memRange.size != 0) {
5372            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5373                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5374                               "VkMapMemory: Attempting to map memory on an already-mapped object %#" PRIxLEAST64, (uint64_t)mem);
5375        }
5376
5377        // Validate that offset + size is within object's allocationSize
5378        if (size == VK_WHOLE_SIZE) {
5379            if (offset >= mem_element->second.allocInfo.allocationSize) {
5380                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5381                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5382                                   "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset,
5383                                   mem_element->second.allocInfo.allocationSize, mem_element->second.allocInfo.allocationSize);
5384            }
5385        } else {
5386            if ((offset + size) > mem_element->second.allocInfo.allocationSize) {
5387                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5388                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5389                                   "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset,
5390                                   size + offset, mem_element->second.allocInfo.allocationSize);
5391            }
5392        }
5393    }
5394    return skipCall;
5395}
5396
5397void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5398    auto mem_element = my_data->memObjMap.find(mem);
5399    if (mem_element != my_data->memObjMap.end()) {
5400        MemRange new_range;
5401        new_range.offset = offset;
5402        new_range.size = size;
5403        mem_element->second.memRange = new_range;
5404    }
5405}
5406
5407VkBool32 deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
5408    VkBool32 skipCall = VK_FALSE;
5409    auto mem_element = my_data->memObjMap.find(mem);
5410    if (mem_element != my_data->memObjMap.end()) {
5411        if (!mem_element->second.memRange.size) {
5412            // Valid Usage: memory must currently be mapped
5413            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5414                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5415                               "Unmapping Memory without memory being mapped: mem obj %#" PRIxLEAST64, (uint64_t)mem);
5416        }
5417        mem_element->second.memRange.size = 0;
5418        if (mem_element->second.pData) {
5419            free(mem_element->second.pData);
5420            mem_element->second.pData = 0;
5421        }
5422    }
5423    return skipCall;
5424}
5425
5426static char NoncoherentMemoryFillValue = 0xb;
5427
5428void initializeAndTrackMemory(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize size, void **ppData) {
5429    auto mem_element = my_data->memObjMap.find(mem);
5430    if (mem_element != my_data->memObjMap.end()) {
5431        mem_element->second.pDriverData = *ppData;
5432        uint32_t index = mem_element->second.allocInfo.memoryTypeIndex;
5433        if (memProps.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
5434            mem_element->second.pData = 0;
5435        } else {
5436            if (size == VK_WHOLE_SIZE) {
5437                size = mem_element->second.allocInfo.allocationSize;
5438            }
5439            size_t convSize = (size_t)(size);
5440            mem_element->second.pData = malloc(2 * convSize);
5441            memset(mem_element->second.pData, NoncoherentMemoryFillValue, 2 * convSize);
5442            *ppData = static_cast<char *>(mem_element->second.pData) + (convSize / 2);
5443        }
5444    }
5445}
5446#endif
5447// Note: This function assumes that the global lock is held by the calling
5448// thread.
5449VkBool32 cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) {
5450    VkBool32 skip_call = VK_FALSE;
5451    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
5452    if (pCB) {
5453        for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) {
5454            for (auto event : queryEventsPair.second) {
5455                if (my_data->eventMap[event].needsSignaled) {
5456                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5457                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
5458                                         "Cannot get query results on queryPool %" PRIu64
5459                                         " with index %d which was guarded by unsignaled event %" PRIu64 ".",
5460                                         (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
5461                }
5462            }
5463        }
5464    }
5465    return skip_call;
5466}
5467// Remove given cmd_buffer from the global inFlight set.
5468//  Also, if given queue is valid, then remove the cmd_buffer from that queues
5469//  inFlightCmdBuffer set. Finally, check all other queues and if given cmd_buffer
5470//  is still in flight on another queue, add it back into the global set.
5471// Note: This function assumes that the global lock is held by the calling
5472// thread.
5473static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkQueue queue) {
5474    // Pull it off of global list initially, but if we find it in any other queue list, add it back in
5475    dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
5476    if (dev_data->queueMap.find(queue) != dev_data->queueMap.end()) {
5477        dev_data->queueMap[queue].inFlightCmdBuffers.erase(cmd_buffer);
5478        for (auto q : dev_data->queues) {
5479            if ((q != queue) &&
5480                (dev_data->queueMap[q].inFlightCmdBuffers.find(cmd_buffer) != dev_data->queueMap[q].inFlightCmdBuffers.end())) {
5481                dev_data->globalInFlightCmdBuffers.insert(cmd_buffer);
5482                break;
5483            }
5484        }
5485    }
5486}
5487#if MTMERGE
5488static inline bool verifyFenceStatus(VkDevice device, VkFence fence, const char *apiCall) {
5489    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5490    VkBool32 skipCall = false;
5491    auto pFenceInfo = my_data->fenceMap.find(fence);
5492    if (pFenceInfo != my_data->fenceMap.end()) {
5493        if (pFenceInfo->second.firstTimeFlag != VK_TRUE) {
5494            if ((pFenceInfo->second.createInfo.flags & VK_FENCE_CREATE_SIGNALED_BIT) &&
5495                pFenceInfo->second.firstTimeFlag != VK_TRUE) {
5496                skipCall |=
5497                    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5498                            (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5499                            "%s specified fence %#" PRIxLEAST64 " already in SIGNALED state.", apiCall, (uint64_t)fence);
5500            }
5501            if (!pFenceInfo->second.queue && !pFenceInfo->second.swapchain) { // Checking status of unsubmitted fence
5502                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5503                                    reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5504                                    "%s called for fence %#" PRIxLEAST64 " which has not been submitted on a Queue or during "
5505                                    "acquire next image.",
5506                                    apiCall, reinterpret_cast<uint64_t &>(fence));
5507            }
5508        } else {
5509            pFenceInfo->second.firstTimeFlag = VK_FALSE;
5510        }
5511    }
5512    return skipCall;
5513}
5514#endif
5515VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5516vkWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
5517    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5518    VkBool32 skip_call = VK_FALSE;
5519#if MTMERGE
5520    // Verify fence status of submitted fences
5521    loader_platform_thread_lock_mutex(&globalLock);
5522    for (uint32_t i = 0; i < fenceCount; i++) {
5523        skip_call |= verifyFenceStatus(device, pFences[i], "vkWaitForFences");
5524    }
5525    loader_platform_thread_unlock_mutex(&globalLock);
5526    if (skip_call)
5527        return VK_ERROR_VALIDATION_FAILED_EXT;
5528#endif
5529    VkResult result = dev_data->device_dispatch_table->WaitForFences(device, fenceCount, pFences, waitAll, timeout);
5530
5531    if (result == VK_SUCCESS) {
5532        loader_platform_thread_lock_mutex(&globalLock);
5533        // When we know that all fences are complete we can clean/remove their CBs
5534        if (waitAll || fenceCount == 1) {
5535            for (uint32_t i = 0; i < fenceCount; ++i) {
5536#if MTMERGE
5537                update_fence_tracking(dev_data, pFences[i]);
5538#endif
5539                VkQueue fence_queue = dev_data->fenceMap[pFences[i]].queue;
5540                for (auto cmdBuffer : dev_data->fenceMap[pFences[i]].cmdBuffers) {
5541                    skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5542                    removeInFlightCmdBuffer(dev_data, cmdBuffer, fence_queue);
5543                }
5544            }
5545            decrementResources(dev_data, fenceCount, pFences);
5546        }
5547        // NOTE : Alternate case not handled here is when some fences have completed. In
5548        //  this case for app to guarantee which fences completed it will have to call
5549        //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
5550        loader_platform_thread_unlock_mutex(&globalLock);
5551    }
5552    if (VK_FALSE != skip_call)
5553        return VK_ERROR_VALIDATION_FAILED_EXT;
5554    return result;
5555}
5556
5557VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(VkDevice device, VkFence fence) {
5558    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5559    bool skipCall = false;
5560    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5561#if MTMERGE
5562    loader_platform_thread_lock_mutex(&globalLock);
5563    skipCall = verifyFenceStatus(device, fence, "vkGetFenceStatus");
5564    loader_platform_thread_unlock_mutex(&globalLock);
5565    if (skipCall)
5566        return result;
5567#endif
5568    result = dev_data->device_dispatch_table->GetFenceStatus(device, fence);
5569    VkBool32 skip_call = VK_FALSE;
5570    loader_platform_thread_lock_mutex(&globalLock);
5571    if (result == VK_SUCCESS) {
5572#if MTMERGE
5573        update_fence_tracking(dev_data, fence);
5574#endif
5575        auto fence_queue = dev_data->fenceMap[fence].queue;
5576        for (auto cmdBuffer : dev_data->fenceMap[fence].cmdBuffers) {
5577            skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5578            removeInFlightCmdBuffer(dev_data, cmdBuffer, fence_queue);
5579        }
5580        decrementResources(dev_data, 1, &fence);
5581    }
5582    loader_platform_thread_unlock_mutex(&globalLock);
5583    if (VK_FALSE != skip_call)
5584        return VK_ERROR_VALIDATION_FAILED_EXT;
5585    return result;
5586}
5587
5588VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5589vkGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
5590    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5591    dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
5592    loader_platform_thread_lock_mutex(&globalLock);
5593    dev_data->queues.push_back(*pQueue);
5594    QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue];
5595    pQNode->device = device;
5596#if MTMERGE
5597    pQNode->lastRetiredId = 0;
5598    pQNode->lastSubmittedId = 0;
5599#endif
5600    loader_platform_thread_unlock_mutex(&globalLock);
5601}
5602
5603VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(VkQueue queue) {
5604    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5605    decrementResources(dev_data, queue);
5606    VkBool32 skip_call = VK_FALSE;
5607    loader_platform_thread_lock_mutex(&globalLock);
5608    // Iterate over local set since we erase set members as we go in for loop
5609    auto local_cb_set = dev_data->queueMap[queue].inFlightCmdBuffers;
5610    for (auto cmdBuffer : local_cb_set) {
5611        skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5612        removeInFlightCmdBuffer(dev_data, cmdBuffer, queue);
5613    }
5614    dev_data->queueMap[queue].inFlightCmdBuffers.clear();
5615    loader_platform_thread_unlock_mutex(&globalLock);
5616    if (VK_FALSE != skip_call)
5617        return VK_ERROR_VALIDATION_FAILED_EXT;
5618    VkResult result = dev_data->device_dispatch_table->QueueWaitIdle(queue);
5619#if MTMERGE
5620    if (VK_SUCCESS == result) {
5621        loader_platform_thread_lock_mutex(&globalLock);
5622        retire_queue_fences(dev_data, queue);
5623        loader_platform_thread_unlock_mutex(&globalLock);
5624    }
5625#endif
5626    return result;
5627}
5628
5629VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(VkDevice device) {
5630    VkBool32 skip_call = VK_FALSE;
5631    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5632    loader_platform_thread_lock_mutex(&globalLock);
5633    for (auto queue : dev_data->queues) {
5634        decrementResources(dev_data, queue);
5635        if (dev_data->queueMap.find(queue) != dev_data->queueMap.end()) {
5636            // Clear all of the queue inFlightCmdBuffers (global set cleared below)
5637            dev_data->queueMap[queue].inFlightCmdBuffers.clear();
5638        }
5639    }
5640    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5641        skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5642    }
5643    dev_data->globalInFlightCmdBuffers.clear();
5644    loader_platform_thread_unlock_mutex(&globalLock);
5645    if (VK_FALSE != skip_call)
5646        return VK_ERROR_VALIDATION_FAILED_EXT;
5647    VkResult result = dev_data->device_dispatch_table->DeviceWaitIdle(device);
5648#if MTMERGE
5649    if (VK_SUCCESS == result) {
5650        loader_platform_thread_lock_mutex(&globalLock);
5651        retire_device_fences(dev_data, device);
5652        loader_platform_thread_unlock_mutex(&globalLock);
5653    }
5654#endif
5655    return result;
5656}
5657
5658VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
5659    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5660    bool skipCall = false;
5661    loader_platform_thread_lock_mutex(&globalLock);
5662    if (dev_data->fenceMap[fence].in_use.load()) {
5663        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5664                            (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5665                            "Fence %#" PRIx64 " is in use by a command buffer.", (uint64_t)(fence));
5666    }
5667#if MTMERGE
5668    delete_fence_info(dev_data, fence);
5669    auto item = dev_data->fenceMap.find(fence);
5670    if (item != dev_data->fenceMap.end()) {
5671        dev_data->fenceMap.erase(item);
5672    }
5673#endif
5674    loader_platform_thread_unlock_mutex(&globalLock);
5675    if (!skipCall)
5676        dev_data->device_dispatch_table->DestroyFence(device, fence, pAllocator);
5677}
5678
5679VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5680vkDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
5681    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5682    dev_data->device_dispatch_table->DestroySemaphore(device, semaphore, pAllocator);
5683    loader_platform_thread_lock_mutex(&globalLock);
5684    auto item = dev_data->semaphoreMap.find(semaphore);
5685    if (item != dev_data->semaphoreMap.end()) {
5686        if (item->second.in_use.load()) {
5687            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5688                    reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
5689                    "Cannot delete semaphore %" PRIx64 " which is in use.", reinterpret_cast<uint64_t &>(semaphore));
5690        }
5691        dev_data->semaphoreMap.erase(semaphore);
5692    }
5693    loader_platform_thread_unlock_mutex(&globalLock);
5694    // TODO : Clean up any internal data structures using this obj.
5695}
5696
5697VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
5698    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5699    bool skip_call = false;
5700    loader_platform_thread_lock_mutex(&globalLock);
5701    auto event_data = dev_data->eventMap.find(event);
5702    if (event_data != dev_data->eventMap.end()) {
5703        if (event_data->second.in_use.load()) {
5704            skip_call |= log_msg(
5705                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5706                reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
5707                "Cannot delete event %" PRIx64 " which is in use by a command buffer.", reinterpret_cast<uint64_t &>(event));
5708        }
5709        dev_data->eventMap.erase(event_data);
5710    }
5711    loader_platform_thread_unlock_mutex(&globalLock);
5712    if (!skip_call)
5713        dev_data->device_dispatch_table->DestroyEvent(device, event, pAllocator);
5714    // TODO : Clean up any internal data structures using this obj.
5715}
5716
5717VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5718vkDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
5719    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5720        ->device_dispatch_table->DestroyQueryPool(device, queryPool, pAllocator);
5721    // TODO : Clean up any internal data structures using this obj.
5722}
5723
5724VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
5725                                                     uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
5726                                                     VkQueryResultFlags flags) {
5727    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5728    unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight;
5729    GLOBAL_CB_NODE *pCB = nullptr;
5730    loader_platform_thread_lock_mutex(&globalLock);
5731    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5732        pCB = getCBNode(dev_data, cmdBuffer);
5733        for (auto queryStatePair : pCB->queryToStateMap) {
5734            queriesInFlight[queryStatePair.first].push_back(cmdBuffer);
5735        }
5736    }
5737    VkBool32 skip_call = VK_FALSE;
5738    for (uint32_t i = 0; i < queryCount; ++i) {
5739        QueryObject query = {queryPool, firstQuery + i};
5740        auto queryElement = queriesInFlight.find(query);
5741        auto queryToStateElement = dev_data->queryToStateMap.find(query);
5742        if (queryToStateElement != dev_data->queryToStateMap.end()) {
5743        }
5744        // Available and in flight
5745        if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5746            queryToStateElement->second) {
5747            for (auto cmdBuffer : queryElement->second) {
5748                pCB = getCBNode(dev_data, cmdBuffer);
5749                auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query);
5750                if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) {
5751                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5752                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5753                                         "Cannot get query results on queryPool %" PRIu64 " with index %d which is in flight.",
5754                                         (uint64_t)(queryPool), firstQuery + i);
5755                } else {
5756                    for (auto event : queryEventElement->second) {
5757                        dev_data->eventMap[event].needsSignaled = true;
5758                    }
5759                }
5760            }
5761            // Unavailable and in flight
5762        } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5763                   !queryToStateElement->second) {
5764            // TODO : Can there be the same query in use by multiple command buffers in flight?
5765            bool make_available = false;
5766            for (auto cmdBuffer : queryElement->second) {
5767                pCB = getCBNode(dev_data, cmdBuffer);
5768                make_available |= pCB->queryToStateMap[query];
5769            }
5770            if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5771                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5772                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5773                                     "Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.",
5774                                     (uint64_t)(queryPool), firstQuery + i);
5775            }
5776            // Unavailable
5777        } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) {
5778            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT,
5779                                 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5780                                 "Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.",
5781                                 (uint64_t)(queryPool), firstQuery + i);
5782            // Unitialized
5783        } else if (queryToStateElement == dev_data->queryToStateMap.end()) {
5784            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT,
5785                                 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5786                                 "Cannot get query results on queryPool %" PRIu64 " with index %d which is uninitialized.",
5787                                 (uint64_t)(queryPool), firstQuery + i);
5788        }
5789    }
5790    loader_platform_thread_unlock_mutex(&globalLock);
5791    if (skip_call)
5792        return VK_ERROR_VALIDATION_FAILED_EXT;
5793    return dev_data->device_dispatch_table->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride,
5794                                                                flags);
5795}
5796
5797VkBool32 validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5798    VkBool32 skip_call = VK_FALSE;
5799    auto buffer_data = my_data->bufferMap.find(buffer);
5800    if (buffer_data == my_data->bufferMap.end()) {
5801        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5802                             (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5803                             "Cannot free buffer %" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5804    } else {
5805        if (buffer_data->second.in_use.load()) {
5806            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5807                                 (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
5808                                 "Cannot free buffer %" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer));
5809        }
5810    }
5811    return skip_call;
5812}
5813
5814VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5815vkDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
5816    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5817    VkBool32 skipCall = VK_FALSE;
5818    loader_platform_thread_lock_mutex(&globalLock);
5819#if MTMERGE
5820    auto item = dev_data->bufferBindingMap.find((uint64_t)buffer);
5821    if (item != dev_data->bufferBindingMap.end()) {
5822        skipCall = clear_object_binding(dev_data, device, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5823        dev_data->bufferBindingMap.erase(item);
5824    }
5825#endif
5826    if (!validateIdleBuffer(dev_data, buffer) && (VK_FALSE == skipCall)) {
5827        loader_platform_thread_unlock_mutex(&globalLock);
5828        dev_data->device_dispatch_table->DestroyBuffer(device, buffer, pAllocator);
5829        loader_platform_thread_lock_mutex(&globalLock);
5830    }
5831    dev_data->bufferMap.erase(buffer);
5832    loader_platform_thread_unlock_mutex(&globalLock);
5833}
5834
5835VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5836vkDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5837    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5838    dev_data->device_dispatch_table->DestroyBufferView(device, bufferView, pAllocator);
5839    loader_platform_thread_lock_mutex(&globalLock);
5840    auto item = dev_data->bufferViewMap.find(bufferView);
5841    if (item != dev_data->bufferViewMap.end()) {
5842        dev_data->bufferViewMap.erase(item);
5843    }
5844    loader_platform_thread_unlock_mutex(&globalLock);
5845}
5846
5847VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5848    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5849    VkBool32 skipCall = VK_FALSE;
5850#if MTMERGE
5851    loader_platform_thread_lock_mutex(&globalLock);
5852    auto item = dev_data->imageBindingMap.find((uint64_t)image);
5853    if (item != dev_data->imageBindingMap.end()) {
5854        skipCall = clear_object_binding(dev_data, device, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
5855        dev_data->imageBindingMap.erase(item);
5856    }
5857    loader_platform_thread_unlock_mutex(&globalLock);
5858#endif
5859    if (VK_FALSE == skipCall)
5860        dev_data->device_dispatch_table->DestroyImage(device, image, pAllocator);
5861
5862    loader_platform_thread_lock_mutex(&globalLock);
5863    const auto& entry = dev_data->imageMap.find(image);
5864    if (entry != dev_data->imageMap.end()) {
5865        // Clear any memory mapping for this image
5866        const auto &mem_entry = dev_data->memObjMap.find(entry->second.mem);
5867        if (mem_entry != dev_data->memObjMap.end())
5868            mem_entry->second.image = VK_NULL_HANDLE;
5869
5870        // Remove image from imageMap
5871        dev_data->imageMap.erase(entry);
5872    }
5873    const auto& subEntry = dev_data->imageSubresourceMap.find(image);
5874    if (subEntry != dev_data->imageSubresourceMap.end()) {
5875        for (const auto& pair : subEntry->second) {
5876            dev_data->imageLayoutMap.erase(pair);
5877        }
5878        dev_data->imageSubresourceMap.erase(subEntry);
5879    }
5880    loader_platform_thread_unlock_mutex(&globalLock);
5881}
5882#if MTMERGE
5883VkBool32 print_memory_range_error(layer_data *dev_data, const uint64_t object_handle, const uint64_t other_handle,
5884                                  VkDebugReportObjectTypeEXT object_type) {
5885    if (object_type == VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT) {
5886        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5887                       MEMTRACK_INVALID_ALIASING, "MEM", "Buffer %" PRIx64 " is alised with image %" PRIx64, object_handle,
5888                       other_handle);
5889    } else {
5890        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5891                       MEMTRACK_INVALID_ALIASING, "MEM", "Image %" PRIx64 " is alised with buffer %" PRIx64, object_handle,
5892                       other_handle);
5893    }
5894}
5895
5896VkBool32 validate_memory_range(layer_data *dev_data, const vector<MEMORY_RANGE> &ranges, const MEMORY_RANGE &new_range,
5897                               VkDebugReportObjectTypeEXT object_type) {
5898    VkBool32 skip_call = false;
5899
5900    for (auto range : ranges) {
5901        if ((range.end & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)) <
5902            (new_range.start & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)))
5903            continue;
5904        if ((range.start & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)) >
5905            (new_range.end & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)))
5906            continue;
5907        skip_call |= print_memory_range_error(dev_data, new_range.handle, range.handle, object_type);
5908    }
5909    return skip_call;
5910}
5911
5912VkBool32 validate_buffer_image_aliasing(layer_data *dev_data, uint64_t handle, VkDeviceMemory mem, VkDeviceSize memoryOffset,
5913                                        VkMemoryRequirements memRequirements, vector<MEMORY_RANGE> &ranges,
5914                                        const vector<MEMORY_RANGE> &other_ranges, VkDebugReportObjectTypeEXT object_type) {
5915    MEMORY_RANGE range;
5916    range.handle = handle;
5917    range.memory = mem;
5918    range.start = memoryOffset;
5919    range.end = memoryOffset + memRequirements.size - 1;
5920    ranges.push_back(range);
5921    return validate_memory_range(dev_data, other_ranges, range, object_type);
5922}
5923
5924VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5925vkBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
5926    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5927    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5928    loader_platform_thread_lock_mutex(&globalLock);
5929    // Track objects tied to memory
5930    uint64_t buffer_handle = (uint64_t)(buffer);
5931    VkBool32 skipCall =
5932        set_mem_binding(dev_data, device, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
5933    add_object_binding_info(dev_data, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, mem);
5934    {
5935        VkMemoryRequirements memRequirements;
5936        // MTMTODO : Shouldn't this call down the chain?
5937        vkGetBufferMemoryRequirements(device, buffer, &memRequirements);
5938        skipCall |= validate_buffer_image_aliasing(dev_data, buffer_handle, mem, memoryOffset, memRequirements,
5939                                                   dev_data->memObjMap[mem].bufferRanges, dev_data->memObjMap[mem].imageRanges,
5940                                                   VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5941    }
5942    print_mem_list(dev_data, device);
5943    loader_platform_thread_unlock_mutex(&globalLock);
5944    if (VK_FALSE == skipCall) {
5945        result = dev_data->device_dispatch_table->BindBufferMemory(device, buffer, mem, memoryOffset);
5946    }
5947    return result;
5948}
5949
5950VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5951vkGetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
5952    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5953    // TODO : What to track here?
5954    //   Could potentially save returned mem requirements and validate values passed into BindBufferMemory
5955    my_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
5956}
5957
5958VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5959vkGetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5960    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5961    // TODO : What to track here?
5962    //   Could potentially save returned mem requirements and validate values passed into BindImageMemory
5963    my_data->device_dispatch_table->GetImageMemoryRequirements(device, image, pMemoryRequirements);
5964}
5965#endif
5966VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5967vkDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
5968    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5969        ->device_dispatch_table->DestroyImageView(device, imageView, pAllocator);
5970    // TODO : Clean up any internal data structures using this obj.
5971}
5972
5973VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5974vkDestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
5975    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5976
5977    loader_platform_thread_lock_mutex(&globalLock);
5978
5979    my_data->shaderModuleMap.erase(shaderModule);
5980
5981    loader_platform_thread_unlock_mutex(&globalLock);
5982
5983    my_data->device_dispatch_table->DestroyShaderModule(device, shaderModule, pAllocator);
5984}
5985
5986VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5987vkDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
5988    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyPipeline(device, pipeline, pAllocator);
5989    // TODO : Clean up any internal data structures using this obj.
5990}
5991
5992VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5993vkDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
5994    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5995        ->device_dispatch_table->DestroyPipelineLayout(device, pipelineLayout, pAllocator);
5996    // TODO : Clean up any internal data structures using this obj.
5997}
5998
5999VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6000vkDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
6001    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroySampler(device, sampler, pAllocator);
6002    // TODO : Clean up any internal data structures using this obj.
6003}
6004
6005VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6006vkDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
6007    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6008        ->device_dispatch_table->DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
6009    // TODO : Clean up any internal data structures using this obj.
6010}
6011
6012VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6013vkDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
6014    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6015        ->device_dispatch_table->DestroyDescriptorPool(device, descriptorPool, pAllocator);
6016    // TODO : Clean up any internal data structures using this obj.
6017}
6018
6019VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6020vkFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
6021    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6022
6023    bool skip_call = false;
6024    loader_platform_thread_lock_mutex(&globalLock);
6025    for (uint32_t i = 0; i < commandBufferCount; i++) {
6026        clear_cmd_buf_and_mem_references(dev_data, pCommandBuffers[i]);
6027        if (dev_data->globalInFlightCmdBuffers.count(pCommandBuffers[i])) {
6028            skip_call |=
6029                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6030                        reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6031                        "Attempt to free command buffer (%#" PRIxLEAST64 ") which is in use.",
6032                        reinterpret_cast<uint64_t>(pCommandBuffers[i]));
6033        }
6034        // Delete CB information structure, and remove from commandBufferMap
6035        auto cb = dev_data->commandBufferMap.find(pCommandBuffers[i]);
6036        if (cb != dev_data->commandBufferMap.end()) {
6037            // reset prior to delete for data clean-up
6038            resetCB(dev_data, (*cb).second->commandBuffer);
6039            delete (*cb).second;
6040            dev_data->commandBufferMap.erase(cb);
6041        }
6042
6043        // Remove commandBuffer reference from commandPoolMap
6044        dev_data->commandPoolMap[commandPool].commandBuffers.remove(pCommandBuffers[i]);
6045    }
6046#if MTMERGE
6047    printCBList(dev_data, device);
6048#endif
6049    loader_platform_thread_unlock_mutex(&globalLock);
6050
6051    if (!skip_call)
6052        dev_data->device_dispatch_table->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
6053}
6054
6055VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
6056                                                                   const VkAllocationCallbacks *pAllocator,
6057                                                                   VkCommandPool *pCommandPool) {
6058    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6059
6060    VkResult result = dev_data->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
6061
6062    if (VK_SUCCESS == result) {
6063        loader_platform_thread_lock_mutex(&globalLock);
6064        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
6065        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
6066        loader_platform_thread_unlock_mutex(&globalLock);
6067    }
6068    return result;
6069}
6070
6071VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
6072                                                                 const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
6073
6074    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6075    VkResult result = dev_data->device_dispatch_table->CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
6076    if (result == VK_SUCCESS) {
6077        loader_platform_thread_lock_mutex(&globalLock);
6078        dev_data->queryPoolMap[*pQueryPool].createInfo = *pCreateInfo;
6079        loader_platform_thread_unlock_mutex(&globalLock);
6080    }
6081    return result;
6082}
6083
6084VkBool32 validateCommandBuffersNotInUse(const layer_data *dev_data, VkCommandPool commandPool) {
6085    VkBool32 skipCall = VK_FALSE;
6086    auto pool_data = dev_data->commandPoolMap.find(commandPool);
6087    if (pool_data != dev_data->commandPoolMap.end()) {
6088        for (auto cmdBuffer : pool_data->second.commandBuffers) {
6089            if (dev_data->globalInFlightCmdBuffers.count(cmdBuffer)) {
6090                skipCall |=
6091                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT,
6092                            (uint64_t)(commandPool), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
6093                            "Cannot reset command pool %" PRIx64 " when allocated command buffer %" PRIx64 " is in use.",
6094                            (uint64_t)(commandPool), (uint64_t)(cmdBuffer));
6095            }
6096        }
6097    }
6098    return skipCall;
6099}
6100
6101// Destroy commandPool along with all of the commandBuffers allocated from that pool
6102VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6103vkDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
6104    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6105    bool commandBufferComplete = false;
6106    bool skipCall = false;
6107    loader_platform_thread_lock_mutex(&globalLock);
6108#if MTMERGE
6109    // Verify that command buffers in pool are complete (not in-flight)
6110    // MTMTODO : Merge this with code below (separate *NotInUse() call)
6111    for (auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6112         it != dev_data->commandPoolMap[commandPool].commandBuffers.end(); it++) {
6113        commandBufferComplete = VK_FALSE;
6114        skipCall = checkCBCompleted(dev_data, *it, &commandBufferComplete);
6115        if (VK_FALSE == commandBufferComplete) {
6116            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6117                                (uint64_t)(*it), __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6118                                "Destroying Command Pool 0x%" PRIxLEAST64 " before "
6119                                "its command buffer (0x%" PRIxLEAST64 ") has completed.",
6120                                (uint64_t)(commandPool), reinterpret_cast<uint64_t>(*it));
6121        }
6122    }
6123#endif
6124    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandPoolMap
6125    if (dev_data->commandPoolMap.find(commandPool) != dev_data->commandPoolMap.end()) {
6126        for (auto poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6127             poolCb != dev_data->commandPoolMap[commandPool].commandBuffers.end();) {
6128            auto del_cb = dev_data->commandBufferMap.find(*poolCb);
6129            delete (*del_cb).second;                  // delete CB info structure
6130            dev_data->commandBufferMap.erase(del_cb); // Remove this command buffer
6131            poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.erase(
6132                poolCb); // Remove CB reference from commandPoolMap's list
6133        }
6134    }
6135    dev_data->commandPoolMap.erase(commandPool);
6136
6137    loader_platform_thread_unlock_mutex(&globalLock);
6138
6139    if (VK_TRUE == validateCommandBuffersNotInUse(dev_data, commandPool))
6140        return;
6141
6142    if (!skipCall)
6143        dev_data->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator);
6144#if MTMERGE
6145    loader_platform_thread_lock_mutex(&globalLock);
6146    auto item = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6147    // Remove command buffers from command buffer map
6148    while (item != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
6149        auto del_item = item++;
6150        delete_cmd_buf_info(dev_data, commandPool, *del_item);
6151    }
6152    dev_data->commandPoolMap.erase(commandPool);
6153    loader_platform_thread_unlock_mutex(&globalLock);
6154#endif
6155}
6156
6157VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6158vkResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
6159    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6160    bool commandBufferComplete = false;
6161    bool skipCall = false;
6162    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6163#if MTMERGE
6164    // MTMTODO : Merge this with *NotInUse() call below
6165    loader_platform_thread_lock_mutex(&globalLock);
6166    auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6167    // Verify that CB's in pool are complete (not in-flight)
6168    while (it != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
6169        skipCall = checkCBCompleted(dev_data, (*it), &commandBufferComplete);
6170        if (!commandBufferComplete) {
6171            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6172                                (uint64_t)(*it), __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6173                                "Resetting CB %p before it has completed. You must check CB "
6174                                "flag before calling vkResetCommandBuffer().",
6175                                (*it));
6176        } else {
6177            // Clear memory references at this point.
6178            clear_cmd_buf_and_mem_references(dev_data, (*it));
6179        }
6180        ++it;
6181    }
6182    loader_platform_thread_unlock_mutex(&globalLock);
6183#endif
6184    if (VK_TRUE == validateCommandBuffersNotInUse(dev_data, commandPool))
6185        return VK_ERROR_VALIDATION_FAILED_EXT;
6186
6187    if (!skipCall)
6188        result = dev_data->device_dispatch_table->ResetCommandPool(device, commandPool, flags);
6189
6190    // Reset all of the CBs allocated from this pool
6191    if (VK_SUCCESS == result) {
6192        loader_platform_thread_lock_mutex(&globalLock);
6193        auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6194        while (it != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
6195            resetCB(dev_data, (*it));
6196            ++it;
6197        }
6198        loader_platform_thread_unlock_mutex(&globalLock);
6199    }
6200    return result;
6201}
6202
6203VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
6204    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6205    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6206    bool skipCall = false;
6207    loader_platform_thread_lock_mutex(&globalLock);
6208    for (uint32_t i = 0; i < fenceCount; ++i) {
6209#if MTMERGE
6210        // Reset fence state in fenceCreateInfo structure
6211        // MTMTODO : Merge with code below
6212        auto fence_item = dev_data->fenceMap.find(pFences[i]);
6213        if (fence_item != dev_data->fenceMap.end()) {
6214            // Validate fences in SIGNALED state
6215            if (!(fence_item->second.createInfo.flags & VK_FENCE_CREATE_SIGNALED_BIT)) {
6216                // TODO: I don't see a Valid Usage section for ResetFences. This behavior should be documented there.
6217                skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6218                                   (uint64_t)pFences[i], __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
6219                                   "Fence %#" PRIxLEAST64 " submitted to VkResetFences in UNSIGNALED STATE", (uint64_t)pFences[i]);
6220            } else {
6221                fence_item->second.createInfo.flags =
6222                    static_cast<VkFenceCreateFlags>(fence_item->second.createInfo.flags & ~VK_FENCE_CREATE_SIGNALED_BIT);
6223            }
6224        }
6225#endif
6226        if (dev_data->fenceMap[pFences[i]].in_use.load()) {
6227            skipCall |=
6228                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6229                        reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
6230                        "Fence %#" PRIx64 " is in use by a command buffer.", reinterpret_cast<const uint64_t &>(pFences[i]));
6231        }
6232    }
6233    loader_platform_thread_unlock_mutex(&globalLock);
6234    if (!skipCall)
6235        result = dev_data->device_dispatch_table->ResetFences(device, fenceCount, pFences);
6236    return result;
6237}
6238
6239VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6240vkDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
6241    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6242#if MTMERGE
6243    // MTMTODO : Merge with code below
6244    loader_platform_thread_lock_mutex(&globalLock);
6245    auto item = dev_data->fbMap.find(framebuffer);
6246    if (item != dev_data->fbMap.end()) {
6247        dev_data->fbMap.erase(framebuffer);
6248    }
6249    loader_platform_thread_unlock_mutex(&globalLock);
6250#endif
6251    auto fbNode = dev_data->frameBufferMap.find(framebuffer);
6252    if (fbNode != dev_data->frameBufferMap.end()) {
6253        for (auto cb : fbNode->second.referencingCmdBuffers) {
6254            auto cbNode = dev_data->commandBufferMap.find(cb);
6255            if (cbNode != dev_data->commandBufferMap.end()) {
6256                // Set CB as invalid and record destroyed framebuffer
6257                cbNode->second->state = CB_INVALID;
6258                loader_platform_thread_lock_mutex(&globalLock);
6259                cbNode->second->destroyedFramebuffers.insert(framebuffer);
6260                loader_platform_thread_unlock_mutex(&globalLock);
6261            }
6262        }
6263        loader_platform_thread_lock_mutex(&globalLock);
6264        dev_data->frameBufferMap.erase(framebuffer);
6265        loader_platform_thread_unlock_mutex(&globalLock);
6266    }
6267    dev_data->device_dispatch_table->DestroyFramebuffer(device, framebuffer, pAllocator);
6268}
6269
6270VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6271vkDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
6272    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6273    dev_data->device_dispatch_table->DestroyRenderPass(device, renderPass, pAllocator);
6274    loader_platform_thread_lock_mutex(&globalLock);
6275    dev_data->renderPassMap.erase(renderPass);
6276    dev_data->passMap.erase(renderPass);
6277    loader_platform_thread_unlock_mutex(&globalLock);
6278}
6279
6280VkBool32 validate_queue_family_indices(layer_data *dev_data, const char *function_name, const uint32_t count,
6281                                       const uint32_t *indices) {
6282    VkBool32 skipCall = VK_FALSE;
6283    for (auto i = 0; i < count; i++) {
6284        if (indices[i] >= dev_data->physDevProperties.queue_family_properties.size()) {
6285            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6286                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
6287                                "%s has QueueFamilyIndex greater than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER
6288                                ") for this device.",
6289                                function_name, dev_data->physDevProperties.queue_family_properties.size());
6290        }
6291    }
6292    return skipCall;
6293}
6294
6295VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6296vkCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
6297    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6298    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6299    bool skipCall = validate_queue_family_indices(dev_data, "vkCreateBuffer", pCreateInfo->queueFamilyIndexCount,
6300                                                  pCreateInfo->pQueueFamilyIndices);
6301    if (!skipCall) {
6302        result = dev_data->device_dispatch_table->CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
6303    }
6304
6305    if (VK_SUCCESS == result) {
6306        loader_platform_thread_lock_mutex(&globalLock);
6307#if MTMERGE
6308        add_object_create_info(dev_data, (uint64_t)*pBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, pCreateInfo);
6309#endif
6310        // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
6311        dev_data->bufferMap[*pBuffer].create_info = unique_ptr<VkBufferCreateInfo>(new VkBufferCreateInfo(*pCreateInfo));
6312        dev_data->bufferMap[*pBuffer].in_use.store(0);
6313        loader_platform_thread_unlock_mutex(&globalLock);
6314    }
6315    return result;
6316}
6317
6318VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
6319                                                                  const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
6320    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6321    VkResult result = dev_data->device_dispatch_table->CreateBufferView(device, pCreateInfo, pAllocator, pView);
6322    if (VK_SUCCESS == result) {
6323        loader_platform_thread_lock_mutex(&globalLock);
6324        dev_data->bufferViewMap[*pView] = VkBufferViewCreateInfo(*pCreateInfo);
6325#if MTMERGE
6326        // In order to create a valid buffer view, the buffer must have been created with at least one of the
6327        // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
6328        validate_buffer_usage_flags(dev_data, device, pCreateInfo->buffer,
6329                                    VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, VK_FALSE,
6330                                    "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
6331#endif
6332        loader_platform_thread_unlock_mutex(&globalLock);
6333    }
6334    return result;
6335}
6336
6337VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6338vkCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
6339    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6340    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6341    bool skipCall = validate_queue_family_indices(dev_data, "vkCreateImage", pCreateInfo->queueFamilyIndexCount,
6342                                                  pCreateInfo->pQueueFamilyIndices);
6343    if (!skipCall) {
6344        result = dev_data->device_dispatch_table->CreateImage(device, pCreateInfo, pAllocator, pImage);
6345    }
6346
6347    if (VK_SUCCESS == result) {
6348        loader_platform_thread_lock_mutex(&globalLock);
6349#if MTMERGE
6350        add_object_create_info(dev_data, (uint64_t)*pImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, pCreateInfo);
6351#endif
6352        IMAGE_LAYOUT_NODE image_node;
6353        image_node.layout = pCreateInfo->initialLayout;
6354        image_node.format = pCreateInfo->format;
6355        dev_data->imageMap[*pImage].createInfo = *pCreateInfo;
6356        ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()};
6357        dev_data->imageSubresourceMap[*pImage].push_back(subpair);
6358        dev_data->imageLayoutMap[subpair] = image_node;
6359        loader_platform_thread_unlock_mutex(&globalLock);
6360    }
6361    return result;
6362}
6363
6364static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
6365    /* expects globalLock to be held by caller */
6366
6367    auto image_node_it = dev_data->imageMap.find(image);
6368    if (image_node_it != dev_data->imageMap.end()) {
6369        /* If the caller used the special values VK_REMAINING_MIP_LEVELS and
6370         * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to
6371         * the actual values.
6372         */
6373        if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
6374            range->levelCount = image_node_it->second.createInfo.mipLevels - range->baseMipLevel;
6375        }
6376
6377        if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
6378            range->layerCount = image_node_it->second.createInfo.arrayLayers - range->baseArrayLayer;
6379        }
6380    }
6381}
6382
6383// Return the correct layer/level counts if the caller used the special
6384// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
6385static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
6386                                         VkImage image) {
6387    /* expects globalLock to be held by caller */
6388
6389    *levels = range.levelCount;
6390    *layers = range.layerCount;
6391    auto image_node_it = dev_data->imageMap.find(image);
6392    if (image_node_it != dev_data->imageMap.end()) {
6393        if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
6394            *levels = image_node_it->second.createInfo.mipLevels - range.baseMipLevel;
6395        }
6396        if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
6397            *layers = image_node_it->second.createInfo.arrayLayers - range.baseArrayLayer;
6398        }
6399    }
6400}
6401
6402VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
6403                                                                 const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
6404    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6405    VkResult result = dev_data->device_dispatch_table->CreateImageView(device, pCreateInfo, pAllocator, pView);
6406    if (VK_SUCCESS == result) {
6407        loader_platform_thread_lock_mutex(&globalLock);
6408        VkImageViewCreateInfo localCI = VkImageViewCreateInfo(*pCreateInfo);
6409        ResolveRemainingLevelsLayers(dev_data, &localCI.subresourceRange, pCreateInfo->image);
6410        dev_data->imageViewMap[*pView] = localCI;
6411#if MTMERGE
6412        // Validate that img has correct usage flags set
6413        validate_image_usage_flags(dev_data, device, pCreateInfo->image,
6414                                   VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT |
6415                                       VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
6416                                   VK_FALSE, "vkCreateImageView()", "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT]_BIT");
6417#endif
6418        loader_platform_thread_unlock_mutex(&globalLock);
6419    }
6420    return result;
6421}
6422
6423VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6424vkCreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
6425    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6426    VkResult result = dev_data->device_dispatch_table->CreateFence(device, pCreateInfo, pAllocator, pFence);
6427    if (VK_SUCCESS == result) {
6428        loader_platform_thread_lock_mutex(&globalLock);
6429        FENCE_NODE *pFN = &dev_data->fenceMap[*pFence];
6430#if MTMERGE
6431        memset(pFN, 0, sizeof(MT_FENCE_INFO));
6432        memcpy(&(pFN->createInfo), pCreateInfo, sizeof(VkFenceCreateInfo));
6433        if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
6434            pFN->firstTimeFlag = VK_TRUE;
6435        }
6436#endif
6437        pFN->in_use.store(0);
6438        loader_platform_thread_unlock_mutex(&globalLock);
6439    }
6440    return result;
6441}
6442
6443// TODO handle pipeline caches
6444VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
6445                                                     const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
6446    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6447    VkResult result = dev_data->device_dispatch_table->CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
6448    return result;
6449}
6450
6451VKAPI_ATTR void VKAPI_CALL
6452vkDestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
6453    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6454    dev_data->device_dispatch_table->DestroyPipelineCache(device, pipelineCache, pAllocator);
6455}
6456
6457VKAPI_ATTR VkResult VKAPI_CALL
6458vkGetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
6459    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6460    VkResult result = dev_data->device_dispatch_table->GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
6461    return result;
6462}
6463
6464VKAPI_ATTR VkResult VKAPI_CALL
6465vkMergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
6466    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6467    VkResult result = dev_data->device_dispatch_table->MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
6468    return result;
6469}
6470
6471VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6472vkCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6473                          const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6474                          VkPipeline *pPipelines) {
6475    VkResult result = VK_SUCCESS;
6476    // TODO What to do with pipelineCache?
6477    // The order of operations here is a little convoluted but gets the job done
6478    //  1. Pipeline create state is first shadowed into PIPELINE_NODE struct
6479    //  2. Create state is then validated (which uses flags setup during shadowing)
6480    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
6481    VkBool32 skipCall = VK_FALSE;
6482    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6483    vector<PIPELINE_NODE *> pPipeNode(count);
6484    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6485
6486    uint32_t i = 0;
6487    loader_platform_thread_lock_mutex(&globalLock);
6488
6489    for (i = 0; i < count; i++) {
6490        pPipeNode[i] = initGraphicsPipeline(dev_data, &pCreateInfos[i]);
6491        skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode, i);
6492    }
6493
6494    if (VK_FALSE == skipCall) {
6495        loader_platform_thread_unlock_mutex(&globalLock);
6496        result = dev_data->device_dispatch_table->CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6497                                                                          pPipelines);
6498        loader_platform_thread_lock_mutex(&globalLock);
6499        for (i = 0; i < count; i++) {
6500            pPipeNode[i]->pipeline = pPipelines[i];
6501            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6502        }
6503        loader_platform_thread_unlock_mutex(&globalLock);
6504    } else {
6505        for (i = 0; i < count; i++) {
6506            delete pPipeNode[i];
6507        }
6508        loader_platform_thread_unlock_mutex(&globalLock);
6509        return VK_ERROR_VALIDATION_FAILED_EXT;
6510    }
6511    return result;
6512}
6513
6514VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6515vkCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6516                         const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6517                         VkPipeline *pPipelines) {
6518    VkResult result = VK_SUCCESS;
6519    VkBool32 skipCall = VK_FALSE;
6520
6521    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6522    vector<PIPELINE_NODE *> pPipeNode(count);
6523    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6524
6525    uint32_t i = 0;
6526    loader_platform_thread_lock_mutex(&globalLock);
6527    for (i = 0; i < count; i++) {
6528        // TODO: Verify compute stage bits
6529
6530        // Create and initialize internal tracking data structure
6531        pPipeNode[i] = new PIPELINE_NODE;
6532        memcpy(&pPipeNode[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo));
6533
6534        // TODO: Add Compute Pipeline Verification
6535        // skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode[i]);
6536    }
6537
6538    if (VK_FALSE == skipCall) {
6539        loader_platform_thread_unlock_mutex(&globalLock);
6540        result = dev_data->device_dispatch_table->CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6541                                                                         pPipelines);
6542        loader_platform_thread_lock_mutex(&globalLock);
6543        for (i = 0; i < count; i++) {
6544            pPipeNode[i]->pipeline = pPipelines[i];
6545            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6546        }
6547        loader_platform_thread_unlock_mutex(&globalLock);
6548    } else {
6549        for (i = 0; i < count; i++) {
6550            // Clean up any locally allocated data structures
6551            delete pPipeNode[i];
6552        }
6553        loader_platform_thread_unlock_mutex(&globalLock);
6554        return VK_ERROR_VALIDATION_FAILED_EXT;
6555    }
6556    return result;
6557}
6558
6559VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
6560                                                               const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
6561    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6562    VkResult result = dev_data->device_dispatch_table->CreateSampler(device, pCreateInfo, pAllocator, pSampler);
6563    if (VK_SUCCESS == result) {
6564        loader_platform_thread_lock_mutex(&globalLock);
6565        dev_data->sampleMap[*pSampler] = unique_ptr<SAMPLER_NODE>(new SAMPLER_NODE(pSampler, pCreateInfo));
6566        loader_platform_thread_unlock_mutex(&globalLock);
6567    }
6568    return result;
6569}
6570
6571VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6572vkCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
6573                            const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
6574    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6575    VkResult result = dev_data->device_dispatch_table->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
6576    if (VK_SUCCESS == result) {
6577        // TODOSC : Capture layout bindings set
6578        LAYOUT_NODE *pNewNode = new LAYOUT_NODE;
6579        if (NULL == pNewNode) {
6580            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
6581                        (uint64_t)*pSetLayout, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6582                        "Out of memory while attempting to allocate LAYOUT_NODE in vkCreateDescriptorSetLayout()"))
6583                return VK_ERROR_VALIDATION_FAILED_EXT;
6584        }
6585        memcpy((void *)&pNewNode->createInfo, pCreateInfo, sizeof(VkDescriptorSetLayoutCreateInfo));
6586        pNewNode->createInfo.pBindings = new VkDescriptorSetLayoutBinding[pCreateInfo->bindingCount];
6587        memcpy((void *)pNewNode->createInfo.pBindings, pCreateInfo->pBindings,
6588               sizeof(VkDescriptorSetLayoutBinding) * pCreateInfo->bindingCount);
6589        // g++ does not like reserve with size 0
6590        if (pCreateInfo->bindingCount)
6591            pNewNode->bindingToIndexMap.reserve(pCreateInfo->bindingCount);
6592        uint32_t totalCount = 0;
6593        for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
6594            if (!pNewNode->bindingToIndexMap.emplace(pCreateInfo->pBindings[i].binding, i).second) {
6595                if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6596                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)*pSetLayout, __LINE__,
6597                            DRAWSTATE_INVALID_LAYOUT, "DS", "duplicated binding number in "
6598                                                            "VkDescriptorSetLayoutBinding"))
6599                    return VK_ERROR_VALIDATION_FAILED_EXT;
6600            } else {
6601                pNewNode->bindingToIndexMap[pCreateInfo->pBindings[i].binding] = i;
6602            }
6603            totalCount += pCreateInfo->pBindings[i].descriptorCount;
6604            if (pCreateInfo->pBindings[i].pImmutableSamplers) {
6605                VkSampler **ppIS = (VkSampler **)&pNewNode->createInfo.pBindings[i].pImmutableSamplers;
6606                *ppIS = new VkSampler[pCreateInfo->pBindings[i].descriptorCount];
6607                memcpy(*ppIS, pCreateInfo->pBindings[i].pImmutableSamplers,
6608                       pCreateInfo->pBindings[i].descriptorCount * sizeof(VkSampler));
6609            }
6610        }
6611        pNewNode->layout = *pSetLayout;
6612        pNewNode->startIndex = 0;
6613        if (totalCount > 0) {
6614            pNewNode->descriptorTypes.resize(totalCount);
6615            pNewNode->stageFlags.resize(totalCount);
6616            uint32_t offset = 0;
6617            uint32_t j = 0;
6618            VkDescriptorType dType;
6619            for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
6620                dType = pCreateInfo->pBindings[i].descriptorType;
6621                for (j = 0; j < pCreateInfo->pBindings[i].descriptorCount; j++) {
6622                    pNewNode->descriptorTypes[offset + j] = dType;
6623                    pNewNode->stageFlags[offset + j] = pCreateInfo->pBindings[i].stageFlags;
6624                    if ((dType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
6625                        (dType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
6626                        pNewNode->dynamicDescriptorCount++;
6627                    }
6628                }
6629                offset += j;
6630            }
6631            pNewNode->endIndex = pNewNode->startIndex + totalCount - 1;
6632        } else { // no descriptors
6633            pNewNode->endIndex = 0;
6634        }
6635        // Put new node at Head of global Layer list
6636        loader_platform_thread_lock_mutex(&globalLock);
6637        dev_data->descriptorSetLayoutMap[*pSetLayout] = pNewNode;
6638        loader_platform_thread_unlock_mutex(&globalLock);
6639    }
6640    return result;
6641}
6642
6643static bool validatePushConstantSize(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
6644                                     const char *caller_name) {
6645    bool skipCall = false;
6646    if ((offset + size) > dev_data->physDevProperties.properties.limits.maxPushConstantsSize) {
6647        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6648                           DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that "
6649                                                                 "exceeds this device's maxPushConstantSize of %u.",
6650                           caller_name, offset, size, dev_data->physDevProperties.properties.limits.maxPushConstantsSize);
6651    }
6652    return skipCall;
6653}
6654
6655VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
6656                                                      const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
6657    bool skipCall = false;
6658    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6659    uint32_t i = 0;
6660    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6661        skipCall |= validatePushConstantSize(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
6662                                             pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()");
6663        if ((pCreateInfo->pPushConstantRanges[i].size == 0) || ((pCreateInfo->pPushConstantRanges[i].size & 0x3) != 0)) {
6664            skipCall |=
6665                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6666                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constant index %u with "
6667                                                              "size %u. Size must be greater than zero and a multiple of 4.",
6668                        i, pCreateInfo->pPushConstantRanges[i].size);
6669        }
6670        // TODO : Add warning if ranges overlap
6671    }
6672    VkResult result = dev_data->device_dispatch_table->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
6673    if (VK_SUCCESS == result) {
6674        loader_platform_thread_lock_mutex(&globalLock);
6675        // TODOSC : Merge capture of the setLayouts per pipeline
6676        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
6677        plNode.descriptorSetLayouts.resize(pCreateInfo->setLayoutCount);
6678        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
6679            plNode.descriptorSetLayouts[i] = pCreateInfo->pSetLayouts[i];
6680        }
6681        plNode.pushConstantRanges.resize(pCreateInfo->pushConstantRangeCount);
6682        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6683            plNode.pushConstantRanges[i] = pCreateInfo->pPushConstantRanges[i];
6684        }
6685        loader_platform_thread_unlock_mutex(&globalLock);
6686    }
6687    return result;
6688}
6689
6690VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6691vkCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
6692                       VkDescriptorPool *pDescriptorPool) {
6693    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6694    VkResult result = dev_data->device_dispatch_table->CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
6695    if (VK_SUCCESS == result) {
6696        // Insert this pool into Global Pool LL at head
6697        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6698                    (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool %#" PRIxLEAST64,
6699                    (uint64_t)*pDescriptorPool))
6700            return VK_ERROR_VALIDATION_FAILED_EXT;
6701        DESCRIPTOR_POOL_NODE *pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo);
6702        if (NULL == pNewNode) {
6703            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6704                        (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6705                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()"))
6706                return VK_ERROR_VALIDATION_FAILED_EXT;
6707        } else {
6708            loader_platform_thread_lock_mutex(&globalLock);
6709            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
6710            loader_platform_thread_unlock_mutex(&globalLock);
6711        }
6712    } else {
6713        // Need to do anything if pool create fails?
6714    }
6715    return result;
6716}
6717
6718VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6719vkResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
6720    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6721    VkResult result = dev_data->device_dispatch_table->ResetDescriptorPool(device, descriptorPool, flags);
6722    if (VK_SUCCESS == result) {
6723        loader_platform_thread_lock_mutex(&globalLock);
6724        clearDescriptorPool(dev_data, device, descriptorPool, flags);
6725        loader_platform_thread_unlock_mutex(&globalLock);
6726    }
6727    return result;
6728}
6729
6730VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6731vkAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
6732    VkBool32 skipCall = VK_FALSE;
6733    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6734
6735    loader_platform_thread_lock_mutex(&globalLock);
6736    // Verify that requested descriptorSets are available in pool
6737    DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool);
6738    if (!pPoolNode) {
6739        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6740                            (uint64_t)pAllocateInfo->descriptorPool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
6741                            "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
6742                            (uint64_t)pAllocateInfo->descriptorPool);
6743    } else { // Make sure pool has all the available descriptors before calling down chain
6744        skipCall |= validate_descriptor_availability_in_pool(dev_data, pPoolNode, pAllocateInfo->descriptorSetCount,
6745                                                             pAllocateInfo->pSetLayouts);
6746    }
6747    loader_platform_thread_unlock_mutex(&globalLock);
6748    if (skipCall)
6749        return VK_ERROR_VALIDATION_FAILED_EXT;
6750    VkResult result = dev_data->device_dispatch_table->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
6751    if (VK_SUCCESS == result) {
6752        loader_platform_thread_lock_mutex(&globalLock);
6753        DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool);
6754        if (pPoolNode) {
6755            if (pAllocateInfo->descriptorSetCount == 0) {
6756                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6757                        pAllocateInfo->descriptorSetCount, __LINE__, DRAWSTATE_NONE, "DS",
6758                        "AllocateDescriptorSets called with 0 count");
6759            }
6760            for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
6761                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6762                        (uint64_t)pDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS", "Created Descriptor Set %#" PRIxLEAST64,
6763                        (uint64_t)pDescriptorSets[i]);
6764                // Create new set node and add to head of pool nodes
6765                SET_NODE *pNewNode = new SET_NODE;
6766                if (NULL == pNewNode) {
6767                    if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6768                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6769                                DRAWSTATE_OUT_OF_MEMORY, "DS",
6770                                "Out of memory while attempting to allocate SET_NODE in vkAllocateDescriptorSets()"))
6771                        return VK_ERROR_VALIDATION_FAILED_EXT;
6772                } else {
6773                    // TODO : Pool should store a total count of each type of Descriptor available
6774                    //  When descriptors are allocated, decrement the count and validate here
6775                    //  that the count doesn't go below 0. One reset/free need to bump count back up.
6776                    // Insert set at head of Set LL for this pool
6777                    pNewNode->pNext = pPoolNode->pSets;
6778                    pNewNode->in_use.store(0);
6779                    pPoolNode->pSets = pNewNode;
6780                    LAYOUT_NODE *pLayout = getLayoutNode(dev_data, pAllocateInfo->pSetLayouts[i]);
6781                    if (NULL == pLayout) {
6782                        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6783                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)pAllocateInfo->pSetLayouts[i],
6784                                    __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
6785                                    "Unable to find set layout node for layout %#" PRIxLEAST64
6786                                    " specified in vkAllocateDescriptorSets() call",
6787                                    (uint64_t)pAllocateInfo->pSetLayouts[i]))
6788                            return VK_ERROR_VALIDATION_FAILED_EXT;
6789                    }
6790                    pNewNode->pLayout = pLayout;
6791                    pNewNode->pool = pAllocateInfo->descriptorPool;
6792                    pNewNode->set = pDescriptorSets[i];
6793                    pNewNode->descriptorCount = (pLayout->createInfo.bindingCount != 0) ? pLayout->endIndex + 1 : 0;
6794                    if (pNewNode->descriptorCount) {
6795                        size_t descriptorArraySize = sizeof(GENERIC_HEADER *) * pNewNode->descriptorCount;
6796                        pNewNode->ppDescriptors = new GENERIC_HEADER *[descriptorArraySize];
6797                        memset(pNewNode->ppDescriptors, 0, descriptorArraySize);
6798                    }
6799                    dev_data->setMap[pDescriptorSets[i]] = pNewNode;
6800                }
6801            }
6802        }
6803        loader_platform_thread_unlock_mutex(&globalLock);
6804    }
6805    return result;
6806}
6807
6808VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6809vkFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
6810    VkBool32 skipCall = VK_FALSE;
6811    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6812    // Make sure that no sets being destroyed are in-flight
6813    loader_platform_thread_lock_mutex(&globalLock);
6814    for (uint32_t i = 0; i < count; ++i)
6815        skipCall |= validateIdleDescriptorSet(dev_data, pDescriptorSets[i], "vkFreeDesriptorSets");
6816    DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, descriptorPool);
6817    if (pPoolNode && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pPoolNode->createInfo.flags)) {
6818        // Can't Free from a NON_FREE pool
6819        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
6820                            (uint64_t)device, __LINE__, DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, "DS",
6821                            "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
6822                            "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
6823    }
6824    loader_platform_thread_unlock_mutex(&globalLock);
6825    if (VK_FALSE != skipCall)
6826        return VK_ERROR_VALIDATION_FAILED_EXT;
6827    VkResult result = dev_data->device_dispatch_table->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
6828    if (VK_SUCCESS == result) {
6829        // For each freed descriptor add it back into the pool as available
6830        loader_platform_thread_lock_mutex(&globalLock);
6831        for (uint32_t i = 0; i < count; ++i) {
6832            SET_NODE *pSet = dev_data->setMap[pDescriptorSets[i]]; // getSetNode() without locking
6833            invalidateBoundCmdBuffers(dev_data, pSet);
6834            LAYOUT_NODE *pLayout = pSet->pLayout;
6835            uint32_t typeIndex = 0, poolSizeCount = 0;
6836            for (uint32_t j = 0; j < pLayout->createInfo.bindingCount; ++j) {
6837                typeIndex = static_cast<uint32_t>(pLayout->createInfo.pBindings[j].descriptorType);
6838                poolSizeCount = pLayout->createInfo.pBindings[j].descriptorCount;
6839                pPoolNode->availableDescriptorTypeCount[typeIndex] += poolSizeCount;
6840            }
6841        }
6842        loader_platform_thread_unlock_mutex(&globalLock);
6843    }
6844    // TODO : Any other clean-up or book-keeping to do here?
6845    return result;
6846}
6847
6848VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6849vkUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
6850                       uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
6851    // dsUpdate will return VK_TRUE only if a bailout error occurs, so we want to call down tree when update returns VK_FALSE
6852    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6853    loader_platform_thread_lock_mutex(&globalLock);
6854#if MTMERGE
6855    // MTMTODO : Merge this in with existing update code below and handle descriptor copies case
6856    uint32_t j = 0;
6857    for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
6858        if (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
6859            for (j = 0; j < pDescriptorWrites[i].descriptorCount; ++j) {
6860                dev_data->descriptorSetMap[pDescriptorWrites[i].dstSet].images.push_back(
6861                    pDescriptorWrites[i].pImageInfo[j].imageView);
6862            }
6863        } else if (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) {
6864            for (j = 0; j < pDescriptorWrites[i].descriptorCount; ++j) {
6865                dev_data->descriptorSetMap[pDescriptorWrites[i].dstSet].buffers.push_back(
6866                    dev_data->bufferViewMap[pDescriptorWrites[i].pTexelBufferView[j]].buffer);
6867            }
6868        } else if (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
6869                   pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
6870            for (j = 0; j < pDescriptorWrites[i].descriptorCount; ++j) {
6871                dev_data->descriptorSetMap[pDescriptorWrites[i].dstSet].buffers.push_back(
6872                    pDescriptorWrites[i].pBufferInfo[j].buffer);
6873            }
6874        }
6875    }
6876#endif
6877    VkBool32 rtn = dsUpdate(dev_data, device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
6878    loader_platform_thread_unlock_mutex(&globalLock);
6879    if (!rtn) {
6880        dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6881                                                              pDescriptorCopies);
6882    }
6883}
6884
6885VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6886vkAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
6887    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6888    VkResult result = dev_data->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
6889    if (VK_SUCCESS == result) {
6890        loader_platform_thread_lock_mutex(&globalLock);
6891        auto const &cp_it = dev_data->commandPoolMap.find(pCreateInfo->commandPool);
6892        if (cp_it != dev_data->commandPoolMap.end()) {
6893            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
6894                // Add command buffer to its commandPool map
6895                cp_it->second.commandBuffers.push_back(pCommandBuffer[i]);
6896                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
6897                // Add command buffer to map
6898                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
6899                resetCB(dev_data, pCommandBuffer[i]);
6900                pCB->createInfo = *pCreateInfo;
6901                pCB->device = device;
6902            }
6903        }
6904#if MTMERGE
6905        printCBList(dev_data, device);
6906#endif
6907        loader_platform_thread_unlock_mutex(&globalLock);
6908    }
6909    return result;
6910}
6911
6912VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6913vkBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
6914    VkBool32 skipCall = VK_FALSE;
6915    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6916    loader_platform_thread_lock_mutex(&globalLock);
6917    // Validate command buffer level
6918    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6919    if (pCB) {
6920#if MTMERGE
6921        bool commandBufferComplete = false;
6922        // MTMTODO : Merge this with code below
6923        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
6924        skipCall = checkCBCompleted(dev_data, commandBuffer, &commandBufferComplete);
6925
6926        if (!commandBufferComplete) {
6927            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6928                                (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6929                                "Calling vkBeginCommandBuffer() on active CB %p before it has completed. "
6930                                "You must check CB flag before this call.",
6931                                commandBuffer);
6932        }
6933#endif
6934        if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
6935            // Secondary Command Buffer
6936            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
6937            if (!pInfo) {
6938                skipCall |=
6939                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6940                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6941                            "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must have inheritance info.",
6942                            reinterpret_cast<void *>(commandBuffer));
6943            } else {
6944                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
6945                    if (!pInfo->renderPass) { // renderpass should NOT be null for an Secondary CB
6946                        skipCall |= log_msg(
6947                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6948                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6949                            "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must specify a valid renderpass parameter.",
6950                            reinterpret_cast<void *>(commandBuffer));
6951                    }
6952                    if (!pInfo->framebuffer) { // framebuffer may be null for an Secondary CB, but this affects perf
6953                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
6954                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6955                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE,
6956                                            "DS", "vkBeginCommandBuffer(): Secondary Command Buffers (%p) may perform better if a "
6957                                                  "valid framebuffer parameter is specified.",
6958                                            reinterpret_cast<void *>(commandBuffer));
6959                    } else {
6960                        string errorString = "";
6961                        auto fbNode = dev_data->frameBufferMap.find(pInfo->framebuffer);
6962                        if (fbNode != dev_data->frameBufferMap.end()) {
6963                            VkRenderPass fbRP = fbNode->second.createInfo.renderPass;
6964                            if (!verify_renderpass_compatibility(dev_data, fbRP, pInfo->renderPass, errorString)) {
6965                                // renderPass that framebuffer was created with
6966                                // must
6967                                // be compatible with local renderPass
6968                                skipCall |=
6969                                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6970                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6971                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
6972                                            "DS", "vkBeginCommandBuffer(): Secondary Command "
6973                                                  "Buffer (%p) renderPass (%#" PRIxLEAST64 ") is incompatible w/ framebuffer "
6974                                                  "(%#" PRIxLEAST64 ") w/ render pass (%#" PRIxLEAST64 ") due to: %s",
6975                                            reinterpret_cast<void *>(commandBuffer), (uint64_t)(pInfo->renderPass),
6976                                            (uint64_t)(pInfo->framebuffer), (uint64_t)(fbRP), errorString.c_str());
6977                            }
6978                            // Connect this framebuffer to this cmdBuffer
6979                            fbNode->second.referencingCmdBuffers.insert(pCB->commandBuffer);
6980                        }
6981                    }
6982                }
6983                if ((pInfo->occlusionQueryEnable == VK_FALSE ||
6984                     dev_data->physDevProperties.features.occlusionQueryPrecise == VK_FALSE) &&
6985                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
6986                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6987                                        VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
6988                                        __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6989                                        "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must not have "
6990                                        "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
6991                                        "support precise occlusion queries.",
6992                                        reinterpret_cast<void *>(commandBuffer));
6993                }
6994            }
6995            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
6996                auto rp_data = dev_data->renderPassMap.find(pInfo->renderPass);
6997                if (rp_data != dev_data->renderPassMap.end() && rp_data->second && rp_data->second->pCreateInfo) {
6998                    if (pInfo->subpass >= rp_data->second->pCreateInfo->subpassCount) {
6999                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7000                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
7001                                            DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7002                                            "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must has a subpass index (%d) "
7003                                            "that is less than the number of subpasses (%d).",
7004                                            (void *)commandBuffer, pInfo->subpass, rp_data->second->pCreateInfo->subpassCount);
7005                    }
7006                }
7007            }
7008        }
7009        if (CB_RECORDING == pCB->state) {
7010            skipCall |=
7011                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7012                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7013                        "vkBeginCommandBuffer(): Cannot call Begin on CB (%#" PRIxLEAST64
7014                        ") in the RECORDING state. Must first call vkEndCommandBuffer().",
7015                        (uint64_t)commandBuffer);
7016        } else if (CB_RECORDED == pCB->state) {
7017            VkCommandPool cmdPool = pCB->createInfo.commandPool;
7018            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
7019                skipCall |=
7020                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7021                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7022                            "Call to vkBeginCommandBuffer() on command buffer (%#" PRIxLEAST64
7023                            ") attempts to implicitly reset cmdBuffer created from command pool (%#" PRIxLEAST64
7024                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
7025                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
7026            }
7027            resetCB(dev_data, commandBuffer);
7028        }
7029        // Set updated state here in case implicit reset occurs above
7030        pCB->state = CB_RECORDING;
7031        pCB->beginInfo = *pBeginInfo;
7032        if (pCB->beginInfo.pInheritanceInfo) {
7033            pCB->inheritanceInfo = *(pCB->beginInfo.pInheritanceInfo);
7034            pCB->beginInfo.pInheritanceInfo = &pCB->inheritanceInfo;
7035        }
7036    } else {
7037        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7038                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
7039                            "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB %p!", (void *)commandBuffer);
7040    }
7041    loader_platform_thread_unlock_mutex(&globalLock);
7042    if (VK_FALSE != skipCall) {
7043        return VK_ERROR_VALIDATION_FAILED_EXT;
7044    }
7045    VkResult result = dev_data->device_dispatch_table->BeginCommandBuffer(commandBuffer, pBeginInfo);
7046#if MTMERGE
7047    loader_platform_thread_lock_mutex(&globalLock);
7048    clear_cmd_buf_and_mem_references(dev_data, commandBuffer);
7049    loader_platform_thread_unlock_mutex(&globalLock);
7050#endif
7051    return result;
7052}
7053
7054VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(VkCommandBuffer commandBuffer) {
7055    VkBool32 skipCall = VK_FALSE;
7056    VkResult result = VK_SUCCESS;
7057    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7058    loader_platform_thread_lock_mutex(&globalLock);
7059    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7060    if (pCB) {
7061        if (pCB->state != CB_RECORDING) {
7062            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkEndCommandBuffer()");
7063        }
7064        for (auto query : pCB->activeQueries) {
7065            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7066                                DRAWSTATE_INVALID_QUERY, "DS",
7067                                "Ending command buffer with in progress query: queryPool %" PRIu64 ", index %d",
7068                                (uint64_t)(query.pool), query.index);
7069        }
7070    }
7071    if (VK_FALSE == skipCall) {
7072        loader_platform_thread_unlock_mutex(&globalLock);
7073        result = dev_data->device_dispatch_table->EndCommandBuffer(commandBuffer);
7074        loader_platform_thread_lock_mutex(&globalLock);
7075        if (VK_SUCCESS == result) {
7076            pCB->state = CB_RECORDED;
7077            // Reset CB status flags
7078            pCB->status = 0;
7079            printCB(dev_data, commandBuffer);
7080        }
7081    } else {
7082        result = VK_ERROR_VALIDATION_FAILED_EXT;
7083    }
7084    loader_platform_thread_unlock_mutex(&globalLock);
7085    return result;
7086}
7087
7088VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
7089vkResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
7090    VkBool32 skipCall = VK_FALSE;
7091    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7092    loader_platform_thread_lock_mutex(&globalLock);
7093#if MTMERGE
7094    bool commandBufferComplete = false;
7095    // Verify that CB is complete (not in-flight)
7096    skipCall = checkCBCompleted(dev_data, commandBuffer, &commandBufferComplete);
7097    if (!commandBufferComplete) {
7098        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7099                            (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
7100                            "Resetting CB %p before it has completed. You must check CB "
7101                            "flag before calling vkResetCommandBuffer().",
7102                            commandBuffer);
7103    }
7104    // Clear memory references as this point.
7105    clear_cmd_buf_and_mem_references(dev_data, commandBuffer);
7106#endif
7107    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7108    VkCommandPool cmdPool = pCB->createInfo.commandPool;
7109    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
7110        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7111                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7112                            "Attempt to reset command buffer (%#" PRIxLEAST64 ") created from command pool (%#" PRIxLEAST64
7113                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
7114                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
7115    }
7116    if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
7117        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7118                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7119                            "Attempt to reset command buffer (%#" PRIxLEAST64 ") which is in use.",
7120                            reinterpret_cast<uint64_t>(commandBuffer));
7121    }
7122    loader_platform_thread_unlock_mutex(&globalLock);
7123    if (skipCall != VK_FALSE)
7124        return VK_ERROR_VALIDATION_FAILED_EXT;
7125    VkResult result = dev_data->device_dispatch_table->ResetCommandBuffer(commandBuffer, flags);
7126    if (VK_SUCCESS == result) {
7127        loader_platform_thread_lock_mutex(&globalLock);
7128        resetCB(dev_data, commandBuffer);
7129        loader_platform_thread_unlock_mutex(&globalLock);
7130    }
7131    return result;
7132}
7133#if MTMERGE
7134// TODO : For any vkCmdBind* calls that include an object which has mem bound to it,
7135//    need to account for that mem now having binding to given commandBuffer
7136#endif
7137VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7138vkCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
7139    VkBool32 skipCall = VK_FALSE;
7140    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7141    loader_platform_thread_lock_mutex(&globalLock);
7142    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7143    if (pCB) {
7144        skipCall |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
7145        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) {
7146            skipCall |=
7147                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7148                        (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
7149                        "Incorrectly binding compute pipeline (%#" PRIxLEAST64 ") during active RenderPass (%#" PRIxLEAST64 ")",
7150                        (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass);
7151        }
7152
7153        PIPELINE_NODE *pPN = getPipeline(dev_data, pipeline);
7154        if (pPN) {
7155            pCB->lastBound[pipelineBindPoint].pipeline = pipeline;
7156            set_cb_pso_status(pCB, pPN);
7157            skipCall |= validatePipelineState(dev_data, pCB, pipelineBindPoint, pipeline);
7158        } else {
7159            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7160                                (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
7161                                "Attempt to bind Pipeline %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline));
7162        }
7163    }
7164    loader_platform_thread_unlock_mutex(&globalLock);
7165    if (VK_FALSE == skipCall)
7166        dev_data->device_dispatch_table->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
7167}
7168
7169VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7170vkCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
7171    VkBool32 skipCall = VK_FALSE;
7172    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7173    loader_platform_thread_lock_mutex(&globalLock);
7174    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7175    if (pCB) {
7176        skipCall |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
7177        pCB->status |= CBSTATUS_VIEWPORT_SET;
7178        pCB->viewports.resize(viewportCount);
7179        memcpy(pCB->viewports.data(), pViewports, viewportCount * sizeof(VkViewport));
7180    }
7181    loader_platform_thread_unlock_mutex(&globalLock);
7182    if (VK_FALSE == skipCall)
7183        dev_data->device_dispatch_table->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
7184}
7185
7186VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7187vkCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
7188    VkBool32 skipCall = VK_FALSE;
7189    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7190    loader_platform_thread_lock_mutex(&globalLock);
7191    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7192    if (pCB) {
7193        skipCall |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
7194        pCB->status |= CBSTATUS_SCISSOR_SET;
7195        pCB->scissors.resize(scissorCount);
7196        memcpy(pCB->scissors.data(), pScissors, scissorCount * sizeof(VkRect2D));
7197    }
7198    loader_platform_thread_unlock_mutex(&globalLock);
7199    if (VK_FALSE == skipCall)
7200        dev_data->device_dispatch_table->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
7201}
7202
7203VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
7204    VkBool32 skipCall = VK_FALSE;
7205    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7206    loader_platform_thread_lock_mutex(&globalLock);
7207    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7208    if (pCB) {
7209        skipCall |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
7210        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
7211    }
7212    loader_platform_thread_unlock_mutex(&globalLock);
7213    if (VK_FALSE == skipCall)
7214        dev_data->device_dispatch_table->CmdSetLineWidth(commandBuffer, lineWidth);
7215}
7216
7217VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7218vkCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
7219    VkBool32 skipCall = VK_FALSE;
7220    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7221    loader_platform_thread_lock_mutex(&globalLock);
7222    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7223    if (pCB) {
7224        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
7225        pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
7226    }
7227    loader_platform_thread_unlock_mutex(&globalLock);
7228    if (VK_FALSE == skipCall)
7229        dev_data->device_dispatch_table->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp,
7230                                                         depthBiasSlopeFactor);
7231}
7232
7233VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
7234    VkBool32 skipCall = VK_FALSE;
7235    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7236    loader_platform_thread_lock_mutex(&globalLock);
7237    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7238    if (pCB) {
7239        skipCall |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
7240        pCB->status |= CBSTATUS_BLEND_SET;
7241    }
7242    loader_platform_thread_unlock_mutex(&globalLock);
7243    if (VK_FALSE == skipCall)
7244        dev_data->device_dispatch_table->CmdSetBlendConstants(commandBuffer, blendConstants);
7245}
7246
7247VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7248vkCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
7249    VkBool32 skipCall = VK_FALSE;
7250    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7251    loader_platform_thread_lock_mutex(&globalLock);
7252    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7253    if (pCB) {
7254        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
7255        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
7256    }
7257    loader_platform_thread_unlock_mutex(&globalLock);
7258    if (VK_FALSE == skipCall)
7259        dev_data->device_dispatch_table->CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
7260}
7261
7262VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7263vkCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
7264    VkBool32 skipCall = VK_FALSE;
7265    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7266    loader_platform_thread_lock_mutex(&globalLock);
7267    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7268    if (pCB) {
7269        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
7270        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
7271    }
7272    loader_platform_thread_unlock_mutex(&globalLock);
7273    if (VK_FALSE == skipCall)
7274        dev_data->device_dispatch_table->CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
7275}
7276
7277VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7278vkCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
7279    VkBool32 skipCall = VK_FALSE;
7280    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7281    loader_platform_thread_lock_mutex(&globalLock);
7282    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7283    if (pCB) {
7284        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
7285        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
7286    }
7287    loader_platform_thread_unlock_mutex(&globalLock);
7288    if (VK_FALSE == skipCall)
7289        dev_data->device_dispatch_table->CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
7290}
7291
7292VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7293vkCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
7294    VkBool32 skipCall = VK_FALSE;
7295    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7296    loader_platform_thread_lock_mutex(&globalLock);
7297    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7298    if (pCB) {
7299        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
7300        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
7301    }
7302    loader_platform_thread_unlock_mutex(&globalLock);
7303    if (VK_FALSE == skipCall)
7304        dev_data->device_dispatch_table->CmdSetStencilReference(commandBuffer, faceMask, reference);
7305}
7306
7307VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7308vkCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
7309                        uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
7310                        const uint32_t *pDynamicOffsets) {
7311    VkBool32 skipCall = VK_FALSE;
7312    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7313    loader_platform_thread_lock_mutex(&globalLock);
7314#if MTMERGE
7315    // MTMTODO : Merge this with code below
7316    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7317    if (cb_data != dev_data->commandBufferMap.end()) {
7318        // MTMTODO : activeDescriptorSets should be merged with lastBound.boundDescriptorSets
7319        std::vector<VkDescriptorSet> &activeDescriptorSets = cb_data->second->activeDescriptorSets;
7320        if (activeDescriptorSets.size() < (setCount + firstSet)) {
7321            activeDescriptorSets.resize(setCount + firstSet);
7322        }
7323        for (uint32_t i = 0; i < setCount; ++i) {
7324            activeDescriptorSets[i + firstSet] = pDescriptorSets[i];
7325        }
7326    }
7327    // TODO : Somewhere need to verify that all textures referenced by shaders in DS are in some type of *SHADER_READ* state
7328#endif
7329    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7330    if (pCB) {
7331        if (pCB->state == CB_RECORDING) {
7332            // Track total count of dynamic descriptor types to make sure we have an offset for each one
7333            uint32_t totalDynamicDescriptors = 0;
7334            string errorString = "";
7335            uint32_t lastSetIndex = firstSet + setCount - 1;
7336            if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size())
7337                pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7338            VkDescriptorSet oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
7339            for (uint32_t i = 0; i < setCount; i++) {
7340                SET_NODE *pSet = getSetNode(dev_data, pDescriptorSets[i]);
7341                if (pSet) {
7342                    pCB->lastBound[pipelineBindPoint].uniqueBoundSets.insert(pDescriptorSets[i]);
7343                    pSet->boundCmdBuffers.insert(commandBuffer);
7344                    pCB->lastBound[pipelineBindPoint].pipelineLayout = layout;
7345                    pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pDescriptorSets[i];
7346                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7347                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7348                                        DRAWSTATE_NONE, "DS", "DS %#" PRIxLEAST64 " bound on pipeline %s",
7349                                        (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
7350                    if (!pSet->pUpdateStructs && (pSet->descriptorCount != 0)) {
7351                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
7352                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
7353                                            __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
7354                                            "DS %#" PRIxLEAST64
7355                                            " bound but it was never updated. You may want to either update it or not bind it.",
7356                                            (uint64_t)pDescriptorSets[i]);
7357                    }
7358                    // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
7359                    if (!verify_set_layout_compatibility(dev_data, pSet, layout, i + firstSet, errorString)) {
7360                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7361                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
7362                                            __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
7363                                            "descriptorSet #%u being bound is not compatible with overlapping layout in "
7364                                            "pipelineLayout due to: %s",
7365                                            i, errorString.c_str());
7366                    }
7367                    if (pSet->pLayout->dynamicDescriptorCount) {
7368                        // First make sure we won't overstep bounds of pDynamicOffsets array
7369                        if ((totalDynamicDescriptors + pSet->pLayout->dynamicDescriptorCount) > dynamicOffsetCount) {
7370                            skipCall |=
7371                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7372                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7373                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7374                                        "descriptorSet #%u (%#" PRIxLEAST64
7375                                        ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
7376                                        "array. There must be one dynamic offset for each dynamic descriptor being bound.",
7377                                        i, (uint64_t)pDescriptorSets[i], pSet->pLayout->dynamicDescriptorCount,
7378                                        (dynamicOffsetCount - totalDynamicDescriptors));
7379                        } else { // Validate and store dynamic offsets with the set
7380                            // Validate Dynamic Offset Minimums
7381                            uint32_t cur_dyn_offset = totalDynamicDescriptors;
7382                            for (uint32_t d = 0; d < pSet->descriptorCount; d++) {
7383                                if (pSet->pLayout->descriptorTypes[d] == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
7384                                    if (vk_safe_modulo(
7385                                            pDynamicOffsets[cur_dyn_offset],
7386                                            dev_data->physDevProperties.properties.limits.minUniformBufferOffsetAlignment) !=
7387                                        0) {
7388                                        skipCall |= log_msg(
7389                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7390                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7391                                            DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
7392                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7393                                            "device limit minUniformBufferOffsetAlignment %#" PRIxLEAST64,
7394                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7395                                            dev_data->physDevProperties.properties.limits.minUniformBufferOffsetAlignment);
7396                                    }
7397                                    cur_dyn_offset++;
7398                                } else if (pSet->pLayout->descriptorTypes[d] == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
7399                                    if (vk_safe_modulo(
7400                                            pDynamicOffsets[cur_dyn_offset],
7401                                            dev_data->physDevProperties.properties.limits.minStorageBufferOffsetAlignment) !=
7402                                        0) {
7403                                        skipCall |= log_msg(
7404                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7405                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7406                                            DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
7407                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7408                                            "device limit minStorageBufferOffsetAlignment %#" PRIxLEAST64,
7409                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7410                                            dev_data->physDevProperties.properties.limits.minStorageBufferOffsetAlignment);
7411                                    }
7412                                    cur_dyn_offset++;
7413                                }
7414                            }
7415                            // Keep running total of dynamic descriptor count to verify at the end
7416                            totalDynamicDescriptors += pSet->pLayout->dynamicDescriptorCount;
7417                        }
7418                    }
7419                } else {
7420                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7421                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7422                                        DRAWSTATE_INVALID_SET, "DS", "Attempt to bind DS %#" PRIxLEAST64 " that doesn't exist!",
7423                                        (uint64_t)pDescriptorSets[i]);
7424                }
7425                skipCall |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
7426                // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
7427                if (firstSet > 0) { // Check set #s below the first bound set
7428                    for (uint32_t i = 0; i < firstSet; ++i) {
7429                        if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
7430                            !verify_set_layout_compatibility(
7431                                dev_data, dev_data->setMap[pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i]], layout, i,
7432                                errorString)) {
7433                            skipCall |= log_msg(
7434                                dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7435                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
7436                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
7437                                "DescriptorSetDS %#" PRIxLEAST64
7438                                " previously bound as set #%u was disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")",
7439                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
7440                            pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
7441                        }
7442                    }
7443                }
7444                // Check if newly last bound set invalidates any remaining bound sets
7445                if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
7446                    if (oldFinalBoundSet &&
7447                        !verify_set_layout_compatibility(dev_data, dev_data->setMap[oldFinalBoundSet], layout, lastSetIndex,
7448                                                         errorString)) {
7449                        skipCall |=
7450                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7451                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)oldFinalBoundSet, __LINE__,
7452                                    DRAWSTATE_NONE, "DS", "DescriptorSetDS %#" PRIxLEAST64
7453                                                          " previously bound as set #%u is incompatible with set %#" PRIxLEAST64
7454                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
7455                                                          "disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")",
7456                                    (uint64_t)oldFinalBoundSet, lastSetIndex,
7457                                    (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
7458                                    lastSetIndex + 1, (uint64_t)layout);
7459                        pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7460                    }
7461                }
7462                //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7463                if (totalDynamicDescriptors != dynamicOffsetCount) {
7464                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7465                                        VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
7466                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7467                                        "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7468                                        "is %u. It should exactly match the number of dynamic descriptors.",
7469                                        setCount, totalDynamicDescriptors, dynamicOffsetCount);
7470                }
7471                // Save dynamicOffsets bound to this CB
7472                for (uint32_t i = 0; i < dynamicOffsetCount; i++) {
7473                    pCB->lastBound[pipelineBindPoint].dynamicOffsets.push_back(pDynamicOffsets[i]);
7474                }
7475            }
7476            //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7477            if (totalDynamicDescriptors != dynamicOffsetCount) {
7478                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7479                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
7480                                    DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7481                                    "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7482                                    "is %u. It should exactly match the number of dynamic descriptors.",
7483                                    setCount, totalDynamicDescriptors, dynamicOffsetCount);
7484            }
7485            // Save dynamicOffsets bound to this CB
7486            for (uint32_t i = 0; i < dynamicOffsetCount; i++) {
7487                pCB->dynamicOffsets.emplace_back(pDynamicOffsets[i]);
7488            }
7489        } else {
7490            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
7491        }
7492    }
7493    loader_platform_thread_unlock_mutex(&globalLock);
7494    if (VK_FALSE == skipCall)
7495        dev_data->device_dispatch_table->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
7496                                                               pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
7497}
7498
7499VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7500vkCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
7501    VkBool32 skipCall = VK_FALSE;
7502    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7503    loader_platform_thread_lock_mutex(&globalLock);
7504#if MTMERGE
7505    VkDeviceMemory mem;
7506    skipCall =
7507        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7508    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7509    if (cb_data != dev_data->commandBufferMap.end()) {
7510        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindIndexBuffer()"); };
7511        cb_data->second->validate_functions.push_back(function);
7512    }
7513    // TODO : Somewhere need to verify that IBs have correct usage state flagged
7514#endif
7515    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7516    if (pCB) {
7517        skipCall |= addCmd(dev_data, pCB, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
7518        VkDeviceSize offset_align = 0;
7519        switch (indexType) {
7520        case VK_INDEX_TYPE_UINT16:
7521            offset_align = 2;
7522            break;
7523        case VK_INDEX_TYPE_UINT32:
7524            offset_align = 4;
7525            break;
7526        default:
7527            // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
7528            break;
7529        }
7530        if (!offset_align || (offset % offset_align)) {
7531            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7532                                DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
7533                                "vkCmdBindIndexBuffer() offset (%#" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
7534                                offset, string_VkIndexType(indexType));
7535        }
7536        pCB->status |= CBSTATUS_INDEX_BUFFER_BOUND;
7537    }
7538    loader_platform_thread_unlock_mutex(&globalLock);
7539    if (VK_FALSE == skipCall)
7540        dev_data->device_dispatch_table->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
7541}
7542
7543void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
7544    uint32_t end = firstBinding + bindingCount;
7545    if (pCB->currentDrawData.buffers.size() < end) {
7546        pCB->currentDrawData.buffers.resize(end);
7547    }
7548    for (uint32_t i = 0; i < bindingCount; ++i) {
7549        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
7550    }
7551}
7552
7553void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
7554
7555VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
7556                                                                  uint32_t bindingCount, const VkBuffer *pBuffers,
7557                                                                  const VkDeviceSize *pOffsets) {
7558    VkBool32 skipCall = VK_FALSE;
7559    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7560    loader_platform_thread_lock_mutex(&globalLock);
7561#if MTMERGE
7562    for (uint32_t i = 0; i < bindingCount; ++i) {
7563        VkDeviceMemory mem;
7564        skipCall |= get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)(pBuffers[i]),
7565                                                 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7566        auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7567        if (cb_data != dev_data->commandBufferMap.end()) {
7568            std::function<VkBool32()> function =
7569                [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindVertexBuffers()"); };
7570            cb_data->second->validate_functions.push_back(function);
7571        }
7572    }
7573    // TODO : Somewhere need to verify that VBs have correct usage state flagged
7574#endif
7575    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7576    if (pCB) {
7577        addCmd(dev_data, pCB, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
7578        updateResourceTracking(pCB, firstBinding, bindingCount, pBuffers);
7579    } else {
7580        skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
7581    }
7582    loader_platform_thread_unlock_mutex(&globalLock);
7583    if (VK_FALSE == skipCall)
7584        dev_data->device_dispatch_table->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
7585}
7586
7587#if MTMERGE
7588/* expects globalLock to be held by caller */
7589bool markStoreImagesAndBuffersAsWritten(VkCommandBuffer commandBuffer) {
7590    bool skip_call = false;
7591    layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7592    auto cb_data = my_data->commandBufferMap.find(commandBuffer);
7593    if (cb_data == my_data->commandBufferMap.end())
7594        return skip_call;
7595    std::vector<VkDescriptorSet> &activeDescriptorSets = cb_data->second->activeDescriptorSets;
7596    for (auto descriptorSet : activeDescriptorSets) {
7597        auto ds_data = my_data->descriptorSetMap.find(descriptorSet);
7598        if (ds_data == my_data->descriptorSetMap.end())
7599            continue;
7600        std::vector<VkImageView> images = ds_data->second.images;
7601        std::vector<VkBuffer> buffers = ds_data->second.buffers;
7602        for (auto imageView : images) {
7603            auto iv_data = my_data->imageViewMap.find(imageView);
7604            if (iv_data == my_data->imageViewMap.end())
7605                continue;
7606            VkImage image = iv_data->second.image;
7607            VkDeviceMemory mem;
7608            skip_call |=
7609                get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7610            std::function<VkBool32()> function = [=]() {
7611                set_memory_valid(my_data, mem, true, image);
7612                return VK_FALSE;
7613            };
7614            cb_data->second->validate_functions.push_back(function);
7615        }
7616        for (auto buffer : buffers) {
7617            VkDeviceMemory mem;
7618            skip_call |=
7619                get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7620            std::function<VkBool32()> function = [=]() {
7621                set_memory_valid(my_data, mem, true);
7622                return VK_FALSE;
7623            };
7624            cb_data->second->validate_functions.push_back(function);
7625        }
7626    }
7627    return skip_call;
7628}
7629#endif
7630
7631VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
7632                                                     uint32_t firstVertex, uint32_t firstInstance) {
7633    VkBool32 skipCall = VK_FALSE;
7634    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7635    loader_platform_thread_lock_mutex(&globalLock);
7636#if MTMERGE
7637    // MTMTODO : merge with code below
7638    skipCall = markStoreImagesAndBuffersAsWritten(commandBuffer);
7639#endif
7640    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7641    if (pCB) {
7642        skipCall |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()");
7643        pCB->drawCount[DRAW]++;
7644        skipCall |= validate_draw_state(dev_data, pCB, VK_FALSE);
7645        // TODO : Need to pass commandBuffer as srcObj here
7646        skipCall |=
7647            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7648                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW]++);
7649        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7650        if (VK_FALSE == skipCall) {
7651            updateResourceTrackingOnDraw(pCB);
7652        }
7653        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDraw");
7654    }
7655    loader_platform_thread_unlock_mutex(&globalLock);
7656    if (VK_FALSE == skipCall)
7657        dev_data->device_dispatch_table->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
7658}
7659
7660VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
7661                                                            uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
7662                                                            uint32_t firstInstance) {
7663    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7664    VkBool32 skipCall = VK_FALSE;
7665    loader_platform_thread_lock_mutex(&globalLock);
7666#if MTMERGE
7667    // MTMTODO : merge with code below
7668    skipCall = markStoreImagesAndBuffersAsWritten(commandBuffer);
7669#endif
7670    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7671    if (pCB) {
7672        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()");
7673        pCB->drawCount[DRAW_INDEXED]++;
7674        skipCall |= validate_draw_state(dev_data, pCB, VK_TRUE);
7675        // TODO : Need to pass commandBuffer as srcObj here
7676        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7677                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7678                            "vkCmdDrawIndexed() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++);
7679        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7680        if (VK_FALSE == skipCall) {
7681            updateResourceTrackingOnDraw(pCB);
7682        }
7683        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed");
7684    }
7685    loader_platform_thread_unlock_mutex(&globalLock);
7686    if (VK_FALSE == skipCall)
7687        dev_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset,
7688                                                        firstInstance);
7689}
7690
7691VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7692vkCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7693    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7694    VkBool32 skipCall = VK_FALSE;
7695    loader_platform_thread_lock_mutex(&globalLock);
7696#if MTMERGE
7697    VkDeviceMemory mem;
7698    // MTMTODO : merge with code below
7699    skipCall =
7700        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7701    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndirect");
7702    skipCall |= markStoreImagesAndBuffersAsWritten(commandBuffer);
7703#endif
7704    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7705    if (pCB) {
7706        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()");
7707        pCB->drawCount[DRAW_INDIRECT]++;
7708        skipCall |= validate_draw_state(dev_data, pCB, VK_FALSE);
7709        // TODO : Need to pass commandBuffer as srcObj here
7710        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7711                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7712                            "vkCmdDrawIndirect() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++);
7713        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7714        if (VK_FALSE == skipCall) {
7715            updateResourceTrackingOnDraw(pCB);
7716        }
7717        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndirect");
7718    }
7719    loader_platform_thread_unlock_mutex(&globalLock);
7720    if (VK_FALSE == skipCall)
7721        dev_data->device_dispatch_table->CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
7722}
7723
7724VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7725vkCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7726    VkBool32 skipCall = VK_FALSE;
7727    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7728    loader_platform_thread_lock_mutex(&globalLock);
7729#if MTMERGE
7730    VkDeviceMemory mem;
7731    // MTMTODO : merge with code below
7732    skipCall =
7733        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7734    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndexedIndirect");
7735    skipCall |= markStoreImagesAndBuffersAsWritten(commandBuffer);
7736#endif
7737    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7738    if (pCB) {
7739        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()");
7740        pCB->drawCount[DRAW_INDEXED_INDIRECT]++;
7741        loader_platform_thread_unlock_mutex(&globalLock);
7742        skipCall |= validate_draw_state(dev_data, pCB, VK_TRUE);
7743        loader_platform_thread_lock_mutex(&globalLock);
7744        // TODO : Need to pass commandBuffer as srcObj here
7745        skipCall |=
7746            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7747                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call #%" PRIu64 ", reporting DS state:",
7748                    g_drawCount[DRAW_INDEXED_INDIRECT]++);
7749        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7750        if (VK_FALSE == skipCall) {
7751            updateResourceTrackingOnDraw(pCB);
7752        }
7753        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexedIndirect");
7754    }
7755    loader_platform_thread_unlock_mutex(&globalLock);
7756    if (VK_FALSE == skipCall)
7757        dev_data->device_dispatch_table->CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
7758}
7759
7760VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
7761    VkBool32 skipCall = VK_FALSE;
7762    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7763    loader_platform_thread_lock_mutex(&globalLock);
7764#if MTMERGE
7765    skipCall = markStoreImagesAndBuffersAsWritten(commandBuffer);
7766#endif
7767    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7768    if (pCB) {
7769        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()");
7770        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatch");
7771    }
7772    loader_platform_thread_unlock_mutex(&globalLock);
7773    if (VK_FALSE == skipCall)
7774        dev_data->device_dispatch_table->CmdDispatch(commandBuffer, x, y, z);
7775}
7776
7777VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7778vkCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
7779    VkBool32 skipCall = VK_FALSE;
7780    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7781    loader_platform_thread_lock_mutex(&globalLock);
7782#if MTMERGE
7783    VkDeviceMemory mem;
7784    skipCall =
7785        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7786    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDispatchIndirect");
7787    skipCall |= markStoreImagesAndBuffersAsWritten(commandBuffer);
7788#endif
7789    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7790    if (pCB) {
7791        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()");
7792        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatchIndirect");
7793    }
7794    loader_platform_thread_unlock_mutex(&globalLock);
7795    if (VK_FALSE == skipCall)
7796        dev_data->device_dispatch_table->CmdDispatchIndirect(commandBuffer, buffer, offset);
7797}
7798
7799VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
7800                                                           uint32_t regionCount, const VkBufferCopy *pRegions) {
7801    VkBool32 skipCall = VK_FALSE;
7802    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7803    loader_platform_thread_lock_mutex(&globalLock);
7804#if MTMERGE
7805    VkDeviceMemory mem;
7806    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7807    loader_platform_thread_lock_mutex(&globalLock);
7808    skipCall =
7809        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7810    if (cb_data != dev_data->commandBufferMap.end()) {
7811        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyBuffer()"); };
7812        cb_data->second->validate_functions.push_back(function);
7813    }
7814    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBuffer");
7815    skipCall |=
7816        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7817    if (cb_data != dev_data->commandBufferMap.end()) {
7818        std::function<VkBool32()> function = [=]() {
7819            set_memory_valid(dev_data, mem, true);
7820            return VK_FALSE;
7821        };
7822        cb_data->second->validate_functions.push_back(function);
7823    }
7824    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBuffer");
7825    // Validate that SRC & DST buffers have correct usage flags set
7826    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
7827                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7828    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7829                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7830#endif
7831    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7832    if (pCB) {
7833        skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
7834        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBuffer");
7835    }
7836    loader_platform_thread_unlock_mutex(&globalLock);
7837    if (VK_FALSE == skipCall)
7838        dev_data->device_dispatch_table->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
7839}
7840
7841VkBool32 VerifySourceImageLayout(VkCommandBuffer cmdBuffer, VkImage srcImage, VkImageSubresourceLayers subLayers,
7842                                 VkImageLayout srcImageLayout) {
7843    VkBool32 skip_call = VK_FALSE;
7844
7845    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7846    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7847    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7848        uint32_t layer = i + subLayers.baseArrayLayer;
7849        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7850        IMAGE_CMD_BUF_LAYOUT_NODE node;
7851        if (!FindLayout(pCB, srcImage, sub, node)) {
7852            SetLayout(pCB, srcImage, sub, {srcImageLayout, srcImageLayout});
7853            continue;
7854        }
7855        if (node.layout != srcImageLayout) {
7856            // TODO: Improve log message in the next pass
7857            skip_call |=
7858                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7859                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
7860                                                                        "and doesn't match the current layout %s.",
7861                        string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
7862        }
7863    }
7864    if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
7865        if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7866            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7867            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7868                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7869                                 "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
7870        } else {
7871            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7872                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be "
7873                                                                       "TRANSFER_SRC_OPTIMAL or GENERAL.",
7874                                 string_VkImageLayout(srcImageLayout));
7875        }
7876    }
7877    return skip_call;
7878}
7879
7880VkBool32 VerifyDestImageLayout(VkCommandBuffer cmdBuffer, VkImage destImage, VkImageSubresourceLayers subLayers,
7881                               VkImageLayout destImageLayout) {
7882    VkBool32 skip_call = VK_FALSE;
7883
7884    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7885    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7886    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7887        uint32_t layer = i + subLayers.baseArrayLayer;
7888        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7889        IMAGE_CMD_BUF_LAYOUT_NODE node;
7890        if (!FindLayout(pCB, destImage, sub, node)) {
7891            SetLayout(pCB, destImage, sub, {destImageLayout, destImageLayout});
7892            continue;
7893        }
7894        if (node.layout != destImageLayout) {
7895            skip_call |=
7896                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7897                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
7898                                                                        "doesn't match the current layout %s.",
7899                        string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
7900        }
7901    }
7902    if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
7903        if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7904            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7905            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7906                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7907                                 "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
7908        } else {
7909            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7910                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be "
7911                                                                       "TRANSFER_DST_OPTIMAL or GENERAL.",
7912                                 string_VkImageLayout(destImageLayout));
7913        }
7914    }
7915    return skip_call;
7916}
7917
7918VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7919vkCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7920               VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
7921    VkBool32 skipCall = VK_FALSE;
7922    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7923    loader_platform_thread_lock_mutex(&globalLock);
7924#if MTMERGE
7925    VkDeviceMemory mem;
7926    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7927    // Validate that src & dst images have correct usage flags set
7928    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7929    if (cb_data != dev_data->commandBufferMap.end()) {
7930        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyImage()", srcImage); };
7931        cb_data->second->validate_functions.push_back(function);
7932    }
7933    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImage");
7934    skipCall |=
7935        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7936    if (cb_data != dev_data->commandBufferMap.end()) {
7937        std::function<VkBool32()> function = [=]() {
7938            set_memory_valid(dev_data, mem, true, dstImage);
7939            return VK_FALSE;
7940        };
7941        cb_data->second->validate_functions.push_back(function);
7942    }
7943    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImage");
7944    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7945                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7946    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7947                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7948#endif
7949    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7950    if (pCB) {
7951        skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGE, "vkCmdCopyImage()");
7952        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImage");
7953        for (uint32_t i = 0; i < regionCount; ++i) {
7954            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].srcSubresource, srcImageLayout);
7955            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].dstSubresource, dstImageLayout);
7956        }
7957    }
7958    loader_platform_thread_unlock_mutex(&globalLock);
7959    if (VK_FALSE == skipCall)
7960        dev_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7961                                                      regionCount, pRegions);
7962}
7963
7964VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7965vkCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7966               VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
7967    VkBool32 skipCall = VK_FALSE;
7968    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7969    loader_platform_thread_lock_mutex(&globalLock);
7970#if MTMERGE
7971    VkDeviceMemory mem;
7972    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7973    // Validate that src & dst images have correct usage flags set
7974    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7975    if (cb_data != dev_data->commandBufferMap.end()) {
7976        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBlitImage()", srcImage); };
7977        cb_data->second->validate_functions.push_back(function);
7978    }
7979    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdBlitImage");
7980    skipCall |=
7981        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7982    if (cb_data != dev_data->commandBufferMap.end()) {
7983        std::function<VkBool32()> function = [=]() {
7984            set_memory_valid(dev_data, mem, true, dstImage);
7985            return VK_FALSE;
7986        };
7987        cb_data->second->validate_functions.push_back(function);
7988    }
7989    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdBlitImage");
7990    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7991                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7992    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7993                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7994#endif
7995    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7996    if (pCB) {
7997        skipCall |= addCmd(dev_data, pCB, CMD_BLITIMAGE, "vkCmdBlitImage()");
7998        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBlitImage");
7999    }
8000    loader_platform_thread_unlock_mutex(&globalLock);
8001    if (VK_FALSE == skipCall)
8002        dev_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
8003                                                      regionCount, pRegions, filter);
8004}
8005
8006VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
8007                                                                  VkImage dstImage, VkImageLayout dstImageLayout,
8008                                                                  uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8009    VkBool32 skipCall = VK_FALSE;
8010    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8011    loader_platform_thread_lock_mutex(&globalLock);
8012#if MTMERGE
8013    VkDeviceMemory mem;
8014    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8015    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8016    if (cb_data != dev_data->commandBufferMap.end()) {
8017        std::function<VkBool32()> function = [=]() {
8018            set_memory_valid(dev_data, mem, true, dstImage);
8019            return VK_FALSE;
8020        };
8021        cb_data->second->validate_functions.push_back(function);
8022    }
8023    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBufferToImage");
8024    skipCall |=
8025        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8026    if (cb_data != dev_data->commandBufferMap.end()) {
8027        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyBufferToImage()"); };
8028        cb_data->second->validate_functions.push_back(function);
8029    }
8030    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBufferToImage");
8031    // Validate that src buff & dst image have correct usage flags set
8032    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
8033                                            "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
8034    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8035                                           "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8036#endif
8037    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8038    if (pCB) {
8039        skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
8040        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBufferToImage");
8041        for (uint32_t i = 0; i < regionCount; ++i) {
8042            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].imageSubresource, dstImageLayout);
8043        }
8044    }
8045    loader_platform_thread_unlock_mutex(&globalLock);
8046    if (VK_FALSE == skipCall)
8047        dev_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount,
8048                                                              pRegions);
8049}
8050
8051VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
8052                                                                  VkImageLayout srcImageLayout, VkBuffer dstBuffer,
8053                                                                  uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8054    VkBool32 skipCall = VK_FALSE;
8055    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8056    loader_platform_thread_lock_mutex(&globalLock);
8057#if MTMERGE
8058    VkDeviceMemory mem;
8059    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8060    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8061    if (cb_data != dev_data->commandBufferMap.end()) {
8062        std::function<VkBool32()> function =
8063            [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyImageToBuffer()", srcImage); };
8064        cb_data->second->validate_functions.push_back(function);
8065    }
8066    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImageToBuffer");
8067    skipCall |=
8068        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8069    if (cb_data != dev_data->commandBufferMap.end()) {
8070        std::function<VkBool32()> function = [=]() {
8071            set_memory_valid(dev_data, mem, true);
8072            return VK_FALSE;
8073        };
8074        cb_data->second->validate_functions.push_back(function);
8075    }
8076    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImageToBuffer");
8077    // Validate that dst buff & src image have correct usage flags set
8078    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8079                                           "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8080    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8081                                            "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8082#endif
8083    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8084    if (pCB) {
8085        skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
8086        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImageToBuffer");
8087        for (uint32_t i = 0; i < regionCount; ++i) {
8088            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].imageSubresource, srcImageLayout);
8089        }
8090    }
8091    loader_platform_thread_unlock_mutex(&globalLock);
8092    if (VK_FALSE == skipCall)
8093        dev_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount,
8094                                                              pRegions);
8095}
8096
8097VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
8098                                                             VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
8099    VkBool32 skipCall = VK_FALSE;
8100    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8101    loader_platform_thread_lock_mutex(&globalLock);
8102#if MTMERGE
8103    VkDeviceMemory mem;
8104    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8105    skipCall =
8106        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8107    if (cb_data != dev_data->commandBufferMap.end()) {
8108        std::function<VkBool32()> function = [=]() {
8109            set_memory_valid(dev_data, mem, true);
8110            return VK_FALSE;
8111        };
8112        cb_data->second->validate_functions.push_back(function);
8113    }
8114    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdUpdateBuffer");
8115    // Validate that dst buff has correct usage flags set
8116    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8117                                            "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8118#endif
8119    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8120    if (pCB) {
8121        skipCall |= addCmd(dev_data, pCB, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
8122        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyUpdateBuffer");
8123    }
8124    loader_platform_thread_unlock_mutex(&globalLock);
8125    if (VK_FALSE == skipCall)
8126        dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
8127}
8128
8129VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8130vkCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
8131    VkBool32 skipCall = VK_FALSE;
8132    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8133    loader_platform_thread_lock_mutex(&globalLock);
8134#if MTMERGE
8135    VkDeviceMemory mem;
8136    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8137    skipCall =
8138        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8139    if (cb_data != dev_data->commandBufferMap.end()) {
8140        std::function<VkBool32()> function = [=]() {
8141            set_memory_valid(dev_data, mem, true);
8142            return VK_FALSE;
8143        };
8144        cb_data->second->validate_functions.push_back(function);
8145    }
8146    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdFillBuffer");
8147    // Validate that dst buff has correct usage flags set
8148    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8149                                            "vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8150#endif
8151    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8152    if (pCB) {
8153        skipCall |= addCmd(dev_data, pCB, CMD_FILLBUFFER, "vkCmdFillBuffer()");
8154        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyFillBuffer");
8155    }
8156    loader_platform_thread_unlock_mutex(&globalLock);
8157    if (VK_FALSE == skipCall)
8158        dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
8159}
8160
8161VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
8162                                                                 const VkClearAttachment *pAttachments, uint32_t rectCount,
8163                                                                 const VkClearRect *pRects) {
8164    VkBool32 skipCall = VK_FALSE;
8165    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8166    loader_platform_thread_lock_mutex(&globalLock);
8167    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8168    if (pCB) {
8169        skipCall |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
8170        // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
8171        if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
8172            (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
8173            // TODO : commandBuffer should be srcObj
8174            // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
8175            // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
8176            // call CmdClearAttachments
8177            // Otherwise this seems more like a performance warning.
8178            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8179                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
8180                                "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
8181                                " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
8182                                (uint64_t)(commandBuffer));
8183        }
8184        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments");
8185    }
8186
8187    // Validate that attachment is in reference list of active subpass
8188    if (pCB->activeRenderPass) {
8189        const VkRenderPassCreateInfo *pRPCI = dev_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo;
8190        const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
8191
8192        for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) {
8193            const VkClearAttachment *attachment = &pAttachments[attachment_idx];
8194            if (attachment->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
8195                VkBool32 found = VK_FALSE;
8196                for (uint32_t i = 0; i < pSD->colorAttachmentCount; i++) {
8197                    if (attachment->colorAttachment == pSD->pColorAttachments[i].attachment) {
8198                        found = VK_TRUE;
8199                        break;
8200                    }
8201                }
8202                if (VK_FALSE == found) {
8203                    skipCall |= log_msg(
8204                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8205                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8206                        "vkCmdClearAttachments() attachment index %d not found in attachment reference array of active subpass %d",
8207                        attachment->colorAttachment, pCB->activeSubpass);
8208                }
8209            } else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
8210                if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
8211                    (pSD->pDepthStencilAttachment->attachment ==
8212                     VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
8213
8214                    skipCall |= log_msg(
8215                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8216                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8217                        "vkCmdClearAttachments() attachment index %d does not match depthStencilAttachment.attachment (%d) found "
8218                        "in active subpass %d",
8219                        attachment->colorAttachment,
8220                        (pSD->pDepthStencilAttachment) ? pSD->pDepthStencilAttachment->attachment : VK_ATTACHMENT_UNUSED,
8221                        pCB->activeSubpass);
8222                }
8223            }
8224        }
8225    }
8226    loader_platform_thread_unlock_mutex(&globalLock);
8227    if (VK_FALSE == skipCall)
8228        dev_data->device_dispatch_table->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
8229}
8230
8231VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
8232                                                                VkImageLayout imageLayout, const VkClearColorValue *pColor,
8233                                                                uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
8234    VkBool32 skipCall = VK_FALSE;
8235    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8236    loader_platform_thread_lock_mutex(&globalLock);
8237#if MTMERGE
8238    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8239    VkDeviceMemory mem;
8240    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8241    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8242    if (cb_data != dev_data->commandBufferMap.end()) {
8243        std::function<VkBool32()> function = [=]() {
8244            set_memory_valid(dev_data, mem, true, image);
8245            return VK_FALSE;
8246        };
8247        cb_data->second->validate_functions.push_back(function);
8248    }
8249    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearColorImage");
8250#endif
8251    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8252    if (pCB) {
8253        skipCall |= addCmd(dev_data, pCB, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
8254        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearColorImage");
8255    }
8256    loader_platform_thread_unlock_mutex(&globalLock);
8257    if (VK_FALSE == skipCall)
8258        dev_data->device_dispatch_table->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
8259}
8260
8261VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8262vkCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
8263                            const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
8264                            const VkImageSubresourceRange *pRanges) {
8265    VkBool32 skipCall = VK_FALSE;
8266    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8267    loader_platform_thread_lock_mutex(&globalLock);
8268#if MTMERGE
8269    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8270    VkDeviceMemory mem;
8271    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8272    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8273    if (cb_data != dev_data->commandBufferMap.end()) {
8274        std::function<VkBool32()> function = [=]() {
8275            set_memory_valid(dev_data, mem, true, image);
8276            return VK_FALSE;
8277        };
8278        cb_data->second->validate_functions.push_back(function);
8279    }
8280    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearDepthStencilImage");
8281#endif
8282    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8283    if (pCB) {
8284        skipCall |= addCmd(dev_data, pCB, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
8285        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearDepthStencilImage");
8286    }
8287    loader_platform_thread_unlock_mutex(&globalLock);
8288    if (VK_FALSE == skipCall)
8289        dev_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount,
8290                                                                   pRanges);
8291}
8292
8293VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8294vkCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8295                  VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
8296    VkBool32 skipCall = VK_FALSE;
8297    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8298    loader_platform_thread_lock_mutex(&globalLock);
8299#if MTMERGE
8300    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8301    VkDeviceMemory mem;
8302    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8303    if (cb_data != dev_data->commandBufferMap.end()) {
8304        std::function<VkBool32()> function =
8305            [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdResolveImage()", srcImage); };
8306        cb_data->second->validate_functions.push_back(function);
8307    }
8308    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdResolveImage");
8309    skipCall |=
8310        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8311    if (cb_data != dev_data->commandBufferMap.end()) {
8312        std::function<VkBool32()> function = [=]() {
8313            set_memory_valid(dev_data, mem, true, dstImage);
8314            return VK_FALSE;
8315        };
8316        cb_data->second->validate_functions.push_back(function);
8317    }
8318    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdResolveImage");
8319#endif
8320    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8321    if (pCB) {
8322        skipCall |= addCmd(dev_data, pCB, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
8323        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResolveImage");
8324    }
8325    loader_platform_thread_unlock_mutex(&globalLock);
8326    if (VK_FALSE == skipCall)
8327        dev_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
8328                                                         regionCount, pRegions);
8329}
8330
8331VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8332vkCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8333    VkBool32 skipCall = VK_FALSE;
8334    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8335    loader_platform_thread_lock_mutex(&globalLock);
8336    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8337    if (pCB) {
8338        skipCall |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
8339        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent");
8340        pCB->events.push_back(event);
8341        pCB->eventToStageMap[event] = stageMask;
8342    }
8343    loader_platform_thread_unlock_mutex(&globalLock);
8344    if (VK_FALSE == skipCall)
8345        dev_data->device_dispatch_table->CmdSetEvent(commandBuffer, event, stageMask);
8346}
8347
8348VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8349vkCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8350    VkBool32 skipCall = VK_FALSE;
8351    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8352    loader_platform_thread_lock_mutex(&globalLock);
8353    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8354    if (pCB) {
8355        skipCall |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
8356        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent");
8357        pCB->events.push_back(event);
8358    }
8359    loader_platform_thread_unlock_mutex(&globalLock);
8360    if (VK_FALSE == skipCall)
8361        dev_data->device_dispatch_table->CmdResetEvent(commandBuffer, event, stageMask);
8362}
8363
8364VkBool32 TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount, const VkImageMemoryBarrier *pImgMemBarriers) {
8365    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8366    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8367    VkBool32 skip = VK_FALSE;
8368    uint32_t levelCount = 0;
8369    uint32_t layerCount = 0;
8370
8371    for (uint32_t i = 0; i < memBarrierCount; ++i) {
8372        auto mem_barrier = &pImgMemBarriers[i];
8373        if (!mem_barrier)
8374            continue;
8375        // TODO: Do not iterate over every possibility - consolidate where
8376        // possible
8377        ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
8378
8379        for (uint32_t j = 0; j < levelCount; j++) {
8380            uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
8381            for (uint32_t k = 0; k < layerCount; k++) {
8382                uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
8383                VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer};
8384                IMAGE_CMD_BUF_LAYOUT_NODE node;
8385                if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
8386                    SetLayout(pCB, mem_barrier->image, sub, {mem_barrier->oldLayout, mem_barrier->newLayout});
8387                    continue;
8388                }
8389                if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
8390                    // TODO: Set memory invalid which is in mem_tracker currently
8391                } else if (node.layout != mem_barrier->oldLayout) {
8392                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8393                                    __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s "
8394                                                                                    "when current layout is %s.",
8395                                    string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
8396                }
8397                SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
8398            }
8399        }
8400    }
8401    return skip;
8402}
8403
8404// Print readable FlagBits in FlagMask
8405std::string string_VkAccessFlags(VkAccessFlags accessMask) {
8406    std::string result;
8407    std::string separator;
8408
8409    if (accessMask == 0) {
8410        result = "[None]";
8411    } else {
8412        result = "[";
8413        for (auto i = 0; i < 32; i++) {
8414            if (accessMask & (1 << i)) {
8415                result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
8416                separator = " | ";
8417            }
8418        }
8419        result = result + "]";
8420    }
8421    return result;
8422}
8423
8424// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
8425// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
8426// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
8427VkBool32 ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8428                          const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits, const char *type) {
8429    VkBool32 skip_call = VK_FALSE;
8430
8431    if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
8432        if (accessMask & !(required_bit | optional_bits)) {
8433            // TODO: Verify against Valid Use
8434            skip_call |=
8435                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8436                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.",
8437                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8438        }
8439    } else {
8440        if (!required_bit) {
8441            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8442                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
8443                                                                  "%s when layout is %s, unless the app has previously added a "
8444                                                                  "barrier for this transition.",
8445                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
8446                                 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
8447        } else {
8448            std::string opt_bits;
8449            if (optional_bits != 0) {
8450                std::stringstream ss;
8451                ss << optional_bits;
8452                opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
8453            }
8454            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8455                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
8456                                                                  "layout is %s, unless the app has previously added a barrier for "
8457                                                                  "this transition.",
8458                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
8459                                 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
8460        }
8461    }
8462    return skip_call;
8463}
8464
8465VkBool32 ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8466                                     const VkImageLayout &layout, const char *type) {
8467    VkBool32 skip_call = VK_FALSE;
8468    switch (layout) {
8469    case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
8470        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
8471                                      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type);
8472        break;
8473    }
8474    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
8475        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
8476                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type);
8477        break;
8478    }
8479    case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
8480        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
8481        break;
8482    }
8483    case VK_IMAGE_LAYOUT_PREINITIALIZED: {
8484        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type);
8485        break;
8486    }
8487    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
8488        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8489                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8490        break;
8491    }
8492    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
8493        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8494                                      VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8495        break;
8496    }
8497    case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
8498        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
8499        break;
8500    }
8501    case VK_IMAGE_LAYOUT_UNDEFINED: {
8502        if (accessMask != 0) {
8503            // TODO: Verify against Valid Use section spec
8504            skip_call |=
8505                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8506                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.",
8507                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8508        }
8509        break;
8510    }
8511    case VK_IMAGE_LAYOUT_GENERAL:
8512    default: { break; }
8513    }
8514    return skip_call;
8515}
8516
8517VkBool32 ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8518                          const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
8519                          const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
8520                          const VkImageMemoryBarrier *pImageMemBarriers) {
8521    VkBool32 skip_call = VK_FALSE;
8522    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8523    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8524    if (pCB->activeRenderPass && memBarrierCount) {
8525        if (!dev_data->renderPassMap[pCB->activeRenderPass]->hasSelfDependency[pCB->activeSubpass]) {
8526            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8527                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
8528                                                                  "with no self dependency specified.",
8529                                 funcName, pCB->activeSubpass);
8530        }
8531    }
8532    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
8533        auto mem_barrier = &pImageMemBarriers[i];
8534        auto image_data = dev_data->imageMap.find(mem_barrier->image);
8535        if (image_data != dev_data->imageMap.end()) {
8536            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
8537            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
8538            if (image_data->second.createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
8539                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
8540                // be VK_QUEUE_FAMILY_IGNORED
8541                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
8542                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8543                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8544                                         "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
8545                                         "VK_SHARING_MODE_CONCURRENT.  Src and dst "
8546                                         " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
8547                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8548                }
8549            } else {
8550                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
8551                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
8552                // or both be a valid queue family
8553                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
8554                    (src_q_f_index != dst_q_f_index)) {
8555                    skip_call |=
8556                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8557                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
8558                                                                     "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
8559                                                                     "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
8560                                                                     "must be.",
8561                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8562                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
8563                           ((src_q_f_index >= dev_data->physDevProperties.queue_family_properties.size()) ||
8564                            (dst_q_f_index >= dev_data->physDevProperties.queue_family_properties.size()))) {
8565                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8566                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8567                                         "%s: Image 0x%" PRIx64 " was created with sharingMode "
8568                                         "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
8569                                         " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
8570                                         "queueFamilies crated for this device.",
8571                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index,
8572                                         dst_q_f_index, dev_data->physDevProperties.queue_family_properties.size());
8573                }
8574            }
8575        }
8576
8577        if (mem_barrier) {
8578            skip_call |=
8579                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
8580            skip_call |=
8581                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
8582            if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
8583                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8584                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
8585                                                         "PREINITIALIZED.",
8586                        funcName);
8587            }
8588            auto image_data = dev_data->imageMap.find(mem_barrier->image);
8589            VkFormat format;
8590            uint32_t arrayLayers, mipLevels;
8591            bool imageFound = false;
8592            if (image_data != dev_data->imageMap.end()) {
8593                format = image_data->second.createInfo.format;
8594                arrayLayers = image_data->second.createInfo.arrayLayers;
8595                mipLevels = image_data->second.createInfo.mipLevels;
8596                imageFound = true;
8597            } else if (dev_data->device_extensions.wsi_enabled) {
8598                auto imageswap_data = dev_data->device_extensions.imageToSwapchainMap.find(mem_barrier->image);
8599                if (imageswap_data != dev_data->device_extensions.imageToSwapchainMap.end()) {
8600                    auto swapchain_data = dev_data->device_extensions.swapchainMap.find(imageswap_data->second);
8601                    if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
8602                        format = swapchain_data->second->createInfo.imageFormat;
8603                        arrayLayers = swapchain_data->second->createInfo.imageArrayLayers;
8604                        mipLevels = 1;
8605                        imageFound = true;
8606                    }
8607                }
8608            }
8609            if (imageFound) {
8610                if (vk_format_is_depth_and_stencil(format) &&
8611                    (!(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) ||
8612                     !(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT))) {
8613                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8614                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth and stencil format and thus must "
8615                                                             "have both VK_IMAGE_ASPECT_DEPTH_BIT and "
8616                                                             "VK_IMAGE_ASPECT_STENCIL_BIT set.",
8617                            funcName);
8618                }
8619                int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
8620                                     ? 1
8621                                     : mem_barrier->subresourceRange.layerCount;
8622                if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
8623                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8624                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
8625                                                             "baseArrayLayer (%d) and layerCount (%d) be less "
8626                                                             "than or equal to the total number of layers (%d).",
8627                            funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount,
8628                            arrayLayers);
8629                }
8630                int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
8631                                     ? 1
8632                                     : mem_barrier->subresourceRange.levelCount;
8633                if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
8634                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8635                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
8636                                                             "(%d) and levelCount (%d) be less than or equal to "
8637                                                             "the total number of levels (%d).",
8638                            funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount,
8639                            mipLevels);
8640                }
8641            }
8642        }
8643    }
8644    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
8645        auto mem_barrier = &pBufferMemBarriers[i];
8646        if (pCB->activeRenderPass) {
8647            skip_call |=
8648                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8649                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
8650        }
8651        if (!mem_barrier)
8652            continue;
8653
8654        // Validate buffer barrier queue family indices
8655        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8656             mem_barrier->srcQueueFamilyIndex >= dev_data->physDevProperties.queue_family_properties.size()) ||
8657            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8658             mem_barrier->dstQueueFamilyIndex >= dev_data->physDevProperties.queue_family_properties.size())) {
8659            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8660                                 DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8661                                 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
8662                                 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
8663                                 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8664                                 dev_data->physDevProperties.queue_family_properties.size());
8665        }
8666
8667        auto buffer_data = dev_data->bufferMap.find(mem_barrier->buffer);
8668        uint64_t buffer_size =
8669            buffer_data->second.create_info ? reinterpret_cast<uint64_t &>(buffer_data->second.create_info->size) : 0;
8670        if (buffer_data != dev_data->bufferMap.end()) {
8671            if (mem_barrier->offset >= buffer_size) {
8672                skip_call |=
8673                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8674                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64
8675                                                             " whose sum is not less than total size %" PRIu64 ".",
8676                            funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8677                            reinterpret_cast<const uint64_t &>(mem_barrier->offset), buffer_size);
8678            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
8679                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8680                                     __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
8681                                     "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64 " and size %" PRIu64
8682                                     " whose sum is greater than total size %" PRIu64 ".",
8683                                     funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8684                                     reinterpret_cast<const uint64_t &>(mem_barrier->offset),
8685                                     reinterpret_cast<const uint64_t &>(mem_barrier->size), buffer_size);
8686            }
8687        }
8688    }
8689    return skip_call;
8690}
8691
8692VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8693vkCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask,
8694                VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8695                uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8696                uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8697    VkBool32 skipCall = VK_FALSE;
8698    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8699    loader_platform_thread_lock_mutex(&globalLock);
8700    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8701    if (pCB) {
8702        VkPipelineStageFlags stageMask = 0;
8703        for (uint32_t i = 0; i < eventCount; ++i) {
8704            pCB->waitedEvents.push_back(pEvents[i]);
8705            pCB->events.push_back(pEvents[i]);
8706            auto event_data = pCB->eventToStageMap.find(pEvents[i]);
8707            if (event_data != pCB->eventToStageMap.end()) {
8708                stageMask |= event_data->second;
8709            } else {
8710                auto global_event_data = dev_data->eventMap.find(pEvents[i]);
8711                if (global_event_data == dev_data->eventMap.end()) {
8712                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
8713                                        reinterpret_cast<const uint64_t &>(pEvents[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
8714                                        "Fence 0x%" PRIx64 " cannot be waited on if it has never been set.",
8715                                        reinterpret_cast<const uint64_t &>(pEvents[i]));
8716                } else {
8717                    stageMask |= global_event_data->second.stageMask;
8718                }
8719            }
8720        }
8721        if (sourceStageMask != stageMask) {
8722            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8723                                DRAWSTATE_INVALID_FENCE, "DS", "srcStageMask in vkCmdWaitEvents must be the bitwise OR of the "
8724                                                               "stageMask parameters used in calls to vkCmdSetEvent and "
8725                                                               "VK_PIPELINE_STAGE_HOST_BIT if used with vkSetEvent.");
8726        }
8727        if (pCB->state == CB_RECORDING) {
8728            skipCall |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()");
8729        } else {
8730            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
8731        }
8732        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8733        skipCall |=
8734            ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8735                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8736    }
8737    loader_platform_thread_unlock_mutex(&globalLock);
8738    if (VK_FALSE == skipCall)
8739        dev_data->device_dispatch_table->CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
8740                                                       memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8741                                                       pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8742}
8743
8744VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8745vkCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
8746                     VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8747                     uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8748                     uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8749    VkBool32 skipCall = VK_FALSE;
8750    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8751    loader_platform_thread_lock_mutex(&globalLock);
8752    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8753    if (pCB) {
8754        skipCall |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
8755        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8756        skipCall |=
8757            ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8758                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8759    }
8760    loader_platform_thread_unlock_mutex(&globalLock);
8761    if (VK_FALSE == skipCall)
8762        dev_data->device_dispatch_table->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
8763                                                            memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8764                                                            pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8765}
8766
8767VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8768vkCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
8769    VkBool32 skipCall = VK_FALSE;
8770    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8771    loader_platform_thread_lock_mutex(&globalLock);
8772    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8773    if (pCB) {
8774        QueryObject query = {queryPool, slot};
8775        pCB->activeQueries.insert(query);
8776        if (!pCB->startedQueries.count(query)) {
8777            pCB->startedQueries.insert(query);
8778        }
8779        skipCall |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
8780    }
8781    loader_platform_thread_unlock_mutex(&globalLock);
8782    if (VK_FALSE == skipCall)
8783        dev_data->device_dispatch_table->CmdBeginQuery(commandBuffer, queryPool, slot, flags);
8784}
8785
8786VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
8787    VkBool32 skipCall = VK_FALSE;
8788    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8789    loader_platform_thread_lock_mutex(&globalLock);
8790    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8791    if (pCB) {
8792        QueryObject query = {queryPool, slot};
8793        if (!pCB->activeQueries.count(query)) {
8794            skipCall |=
8795                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8796                        DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool %" PRIu64 ", index %d",
8797                        (uint64_t)(queryPool), slot);
8798        } else {
8799            pCB->activeQueries.erase(query);
8800        }
8801        pCB->queryToStateMap[query] = 1;
8802        if (pCB->state == CB_RECORDING) {
8803            skipCall |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
8804        } else {
8805            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
8806        }
8807    }
8808    loader_platform_thread_unlock_mutex(&globalLock);
8809    if (VK_FALSE == skipCall)
8810        dev_data->device_dispatch_table->CmdEndQuery(commandBuffer, queryPool, slot);
8811}
8812
8813VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8814vkCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
8815    VkBool32 skipCall = VK_FALSE;
8816    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8817    loader_platform_thread_lock_mutex(&globalLock);
8818    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8819    if (pCB) {
8820        for (uint32_t i = 0; i < queryCount; i++) {
8821            QueryObject query = {queryPool, firstQuery + i};
8822            pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
8823            pCB->queryToStateMap[query] = 0;
8824        }
8825        if (pCB->state == CB_RECORDING) {
8826            skipCall |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
8827        } else {
8828            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
8829        }
8830        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool");
8831    }
8832    loader_platform_thread_unlock_mutex(&globalLock);
8833    if (VK_FALSE == skipCall)
8834        dev_data->device_dispatch_table->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
8835}
8836
8837VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8838vkCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
8839                          VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
8840    VkBool32 skipCall = VK_FALSE;
8841    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8842    loader_platform_thread_lock_mutex(&globalLock);
8843    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8844#if MTMERGE
8845    VkDeviceMemory mem;
8846    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8847    skipCall |=
8848        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8849    if (cb_data != dev_data->commandBufferMap.end()) {
8850        std::function<VkBool32()> function = [=]() {
8851            set_memory_valid(dev_data, mem, true);
8852            return VK_FALSE;
8853        };
8854        cb_data->second->validate_functions.push_back(function);
8855    }
8856    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyQueryPoolResults");
8857    // Validate that DST buffer has correct usage flags set
8858    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8859                                            "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8860#endif
8861    if (pCB) {
8862        for (uint32_t i = 0; i < queryCount; i++) {
8863            QueryObject query = {queryPool, firstQuery + i};
8864            if (!pCB->queryToStateMap[query]) {
8865                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8866                                    __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
8867                                    "Requesting a copy from query to buffer with invalid query: queryPool %" PRIu64 ", index %d",
8868                                    (uint64_t)(queryPool), firstQuery + i);
8869            }
8870        }
8871        if (pCB->state == CB_RECORDING) {
8872            skipCall |= addCmd(dev_data, pCB, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
8873        } else {
8874            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
8875        }
8876        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyQueryPoolResults");
8877    }
8878    loader_platform_thread_unlock_mutex(&globalLock);
8879    if (VK_FALSE == skipCall)
8880        dev_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer,
8881                                                                 dstOffset, stride, flags);
8882}
8883
8884VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
8885                                                              VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
8886                                                              const void *pValues) {
8887    bool skipCall = false;
8888    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8889    loader_platform_thread_lock_mutex(&globalLock);
8890    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8891    if (pCB) {
8892        if (pCB->state == CB_RECORDING) {
8893            skipCall |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
8894        } else {
8895            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
8896        }
8897    }
8898    if ((offset + size) > dev_data->physDevProperties.properties.limits.maxPushConstantsSize) {
8899        skipCall |= validatePushConstantSize(dev_data, offset, size, "vkCmdPushConstants()");
8900    }
8901    // TODO : Add warning if push constant update doesn't align with range
8902    loader_platform_thread_unlock_mutex(&globalLock);
8903    if (!skipCall)
8904        dev_data->device_dispatch_table->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
8905}
8906
8907VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8908vkCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
8909    VkBool32 skipCall = VK_FALSE;
8910    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8911    loader_platform_thread_lock_mutex(&globalLock);
8912    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8913    if (pCB) {
8914        QueryObject query = {queryPool, slot};
8915        pCB->queryToStateMap[query] = 1;
8916        if (pCB->state == CB_RECORDING) {
8917            skipCall |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
8918        } else {
8919            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
8920        }
8921    }
8922    loader_platform_thread_unlock_mutex(&globalLock);
8923    if (VK_FALSE == skipCall)
8924        dev_data->device_dispatch_table->CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
8925}
8926
8927VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
8928                                                                   const VkAllocationCallbacks *pAllocator,
8929                                                                   VkFramebuffer *pFramebuffer) {
8930    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8931    VkResult result = dev_data->device_dispatch_table->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
8932    if (VK_SUCCESS == result) {
8933        // Shadow create info and store in map
8934        VkFramebufferCreateInfo *localFBCI = new VkFramebufferCreateInfo(*pCreateInfo);
8935        if (pCreateInfo->pAttachments) {
8936            localFBCI->pAttachments = new VkImageView[localFBCI->attachmentCount];
8937            memcpy((void *)localFBCI->pAttachments, pCreateInfo->pAttachments, localFBCI->attachmentCount * sizeof(VkImageView));
8938        }
8939        FRAMEBUFFER_NODE fbNode = {};
8940        fbNode.createInfo = *localFBCI;
8941        std::pair<VkFramebuffer, FRAMEBUFFER_NODE> fbPair(*pFramebuffer, fbNode);
8942        loader_platform_thread_lock_mutex(&globalLock);
8943#if MTMERGE
8944        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8945            VkImageView view = pCreateInfo->pAttachments[i];
8946            auto view_data = dev_data->imageViewMap.find(view);
8947            if (view_data == dev_data->imageViewMap.end()) {
8948                continue;
8949            }
8950            MT_FB_ATTACHMENT_INFO fb_info;
8951            get_mem_binding_from_object(dev_data, device, (uint64_t)(view_data->second.image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8952                                        &fb_info.mem);
8953            fb_info.image = view_data->second.image;
8954            dev_data->fbMap[*pFramebuffer].attachments.push_back(fb_info);
8955        }
8956#endif
8957        dev_data->frameBufferMap.insert(fbPair);
8958        loader_platform_thread_unlock_mutex(&globalLock);
8959    }
8960    return result;
8961}
8962
8963VkBool32 FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
8964                        std::unordered_set<uint32_t> &processed_nodes) {
8965    // If we have already checked this node we have not found a dependency path so return false.
8966    if (processed_nodes.count(index))
8967        return VK_FALSE;
8968    processed_nodes.insert(index);
8969    const DAGNode &node = subpass_to_node[index];
8970    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
8971    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
8972        for (auto elem : node.prev) {
8973            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
8974                return VK_TRUE;
8975        }
8976    } else {
8977        return VK_TRUE;
8978    }
8979    return VK_FALSE;
8980}
8981
8982VkBool32 CheckDependencyExists(const layer_data *my_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
8983                               const std::vector<DAGNode> &subpass_to_node, VkBool32 &skip_call) {
8984    VkBool32 result = VK_TRUE;
8985    // Loop through all subpasses that share the same attachment and make sure a dependency exists
8986    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
8987        if (subpass == dependent_subpasses[k])
8988            continue;
8989        const DAGNode &node = subpass_to_node[subpass];
8990        // Check for a specified dependency between the two nodes. If one exists we are done.
8991        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
8992        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
8993        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
8994            // If no dependency exits an implicit dependency still might. If so, warn and if not throw an error.
8995            std::unordered_set<uint32_t> processed_nodes;
8996            if (FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
8997                FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes)) {
8998                // TODO: Verify against Valid Use section of spec
8999                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9000                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9001                                     "A dependency between subpasses %d and %d must exist but only an implicit one is specified.",
9002                                     subpass, dependent_subpasses[k]);
9003            } else {
9004                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9005                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9006                                     "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
9007                                     dependent_subpasses[k]);
9008                result = VK_FALSE;
9009            }
9010        }
9011    }
9012    return result;
9013}
9014
9015VkBool32 CheckPreserved(const layer_data *my_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
9016                        const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, VkBool32 &skip_call) {
9017    const DAGNode &node = subpass_to_node[index];
9018    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
9019    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9020    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9021        if (attachment == subpass.pColorAttachments[j].attachment)
9022            return VK_TRUE;
9023    }
9024    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9025        if (attachment == subpass.pDepthStencilAttachment->attachment)
9026            return VK_TRUE;
9027    }
9028    VkBool32 result = VK_FALSE;
9029    // Loop through previous nodes and see if any of them write to the attachment.
9030    for (auto elem : node.prev) {
9031        result |= CheckPreserved(my_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
9032    }
9033    // If the attachment was written to by a previous node than this node needs to preserve it.
9034    if (result && depth > 0) {
9035        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9036        VkBool32 has_preserved = VK_FALSE;
9037        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9038            if (subpass.pPreserveAttachments[j] == attachment) {
9039                has_preserved = VK_TRUE;
9040                break;
9041            }
9042        }
9043        if (has_preserved == VK_FALSE) {
9044            skip_call |=
9045                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9046                        DRAWSTATE_INVALID_RENDERPASS, "DS",
9047                        "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
9048        }
9049    }
9050    return result;
9051}
9052
9053template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
9054    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
9055           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
9056}
9057
9058bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
9059    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
9060            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
9061}
9062
9063VkBool32 ValidateDependencies(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin,
9064                              const std::vector<DAGNode> &subpass_to_node) {
9065    VkBool32 skip_call = VK_FALSE;
9066    const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo;
9067    const VkRenderPassCreateInfo *pCreateInfo = my_data->renderPassMap.at(pRenderPassBegin->renderPass)->pCreateInfo;
9068    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
9069    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
9070    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
9071    // Find overlapping attachments
9072    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9073        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
9074            VkImageView viewi = pFramebufferInfo->pAttachments[i];
9075            VkImageView viewj = pFramebufferInfo->pAttachments[j];
9076            if (viewi == viewj) {
9077                overlapping_attachments[i].push_back(j);
9078                overlapping_attachments[j].push_back(i);
9079                continue;
9080            }
9081            auto view_data_i = my_data->imageViewMap.find(viewi);
9082            auto view_data_j = my_data->imageViewMap.find(viewj);
9083            if (view_data_i == my_data->imageViewMap.end() || view_data_j == my_data->imageViewMap.end()) {
9084                continue;
9085            }
9086            if (view_data_i->second.image == view_data_j->second.image &&
9087                isRegionOverlapping(view_data_i->second.subresourceRange, view_data_j->second.subresourceRange)) {
9088                overlapping_attachments[i].push_back(j);
9089                overlapping_attachments[j].push_back(i);
9090                continue;
9091            }
9092            auto image_data_i = my_data->imageMap.find(view_data_i->second.image);
9093            auto image_data_j = my_data->imageMap.find(view_data_j->second.image);
9094            if (image_data_i == my_data->imageMap.end() || image_data_j == my_data->imageMap.end()) {
9095                continue;
9096            }
9097            if (image_data_i->second.mem == image_data_j->second.mem &&
9098                isRangeOverlapping(image_data_i->second.memOffset, image_data_i->second.memSize, image_data_j->second.memOffset,
9099                                   image_data_j->second.memSize)) {
9100                overlapping_attachments[i].push_back(j);
9101                overlapping_attachments[j].push_back(i);
9102            }
9103        }
9104    }
9105    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
9106        uint32_t attachment = i;
9107        for (auto other_attachment : overlapping_attachments[i]) {
9108            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9109                skip_call |=
9110                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9111                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9112                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9113                            attachment, other_attachment);
9114            }
9115            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9116                skip_call |=
9117                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9118                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9119                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9120                            other_attachment, attachment);
9121            }
9122        }
9123    }
9124    // Find for each attachment the subpasses that use them.
9125    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9126        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9127        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9128            uint32_t attachment = subpass.pInputAttachments[j].attachment;
9129            input_attachment_to_subpass[attachment].push_back(i);
9130            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9131                input_attachment_to_subpass[overlapping_attachment].push_back(i);
9132            }
9133        }
9134        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9135            uint32_t attachment = subpass.pColorAttachments[j].attachment;
9136            output_attachment_to_subpass[attachment].push_back(i);
9137            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9138                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9139            }
9140        }
9141        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9142            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9143            output_attachment_to_subpass[attachment].push_back(i);
9144            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9145                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9146            }
9147        }
9148    }
9149    // If there is a dependency needed make sure one exists
9150    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9151        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9152        // If the attachment is an input then all subpasses that output must have a dependency relationship
9153        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9154            const uint32_t &attachment = subpass.pInputAttachments[j].attachment;
9155            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9156        }
9157        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
9158        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9159            const uint32_t &attachment = subpass.pColorAttachments[j].attachment;
9160            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9161            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9162        }
9163        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9164            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
9165            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9166            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9167        }
9168    }
9169    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
9170    // written.
9171    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9172        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9173        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9174            CheckPreserved(my_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
9175        }
9176    }
9177    return skip_call;
9178}
9179
9180VkBool32 ValidateLayouts(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
9181    VkBool32 skip = VK_FALSE;
9182
9183    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9184        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9185        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9186            if (subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL &&
9187                subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
9188                if (subpass.pInputAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
9189                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9190                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9191                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9192                                    "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
9193                } else {
9194                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9195                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9196                                    "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
9197                                    string_VkImageLayout(subpass.pInputAttachments[j].layout));
9198                }
9199            }
9200        }
9201        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9202            if (subpass.pColorAttachments[j].layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
9203                if (subpass.pColorAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
9204                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9205                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9206                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9207                                    "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
9208                } else {
9209                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9210                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9211                                    "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
9212                                    string_VkImageLayout(subpass.pColorAttachments[j].layout));
9213                }
9214            }
9215        }
9216        if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
9217            if (subpass.pDepthStencilAttachment->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) {
9218                if (subpass.pDepthStencilAttachment->layout == VK_IMAGE_LAYOUT_GENERAL) {
9219                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9220                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9221                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9222                                    "Layout for depth attachment is GENERAL but should be DEPTH_STENCIL_ATTACHMENT_OPTIMAL.");
9223                } else {
9224                    skip |=
9225                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9226                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9227                                "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL or GENERAL.",
9228                                string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
9229                }
9230            }
9231        }
9232    }
9233    return skip;
9234}
9235
9236VkBool32 CreatePassDAG(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9237                       std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
9238    VkBool32 skip_call = VK_FALSE;
9239    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9240        DAGNode &subpass_node = subpass_to_node[i];
9241        subpass_node.pass = i;
9242    }
9243    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
9244        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
9245        if (dependency.srcSubpass > dependency.dstSubpass && dependency.srcSubpass != VK_SUBPASS_EXTERNAL &&
9246            dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
9247            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9248                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
9249                                 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
9250        } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL && dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
9251            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9252                                 DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
9253        } else if (dependency.srcSubpass == dependency.dstSubpass) {
9254            has_self_dependency[dependency.srcSubpass] = true;
9255        }
9256        if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
9257            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
9258        }
9259        if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL) {
9260            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
9261        }
9262    }
9263    return skip_call;
9264}
9265
9266
9267VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
9268                                                                    const VkAllocationCallbacks *pAllocator,
9269                                                                    VkShaderModule *pShaderModule) {
9270    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9271    VkBool32 skip_call = VK_FALSE;
9272    if (!shader_is_spirv(pCreateInfo)) {
9273        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
9274                             /* dev */ 0, __LINE__, SHADER_CHECKER_NON_SPIRV_SHADER, "SC", "Shader is not SPIR-V");
9275    }
9276
9277    if (VK_FALSE != skip_call)
9278        return VK_ERROR_VALIDATION_FAILED_EXT;
9279
9280    VkResult res = my_data->device_dispatch_table->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
9281
9282    if (res == VK_SUCCESS) {
9283        loader_platform_thread_lock_mutex(&globalLock);
9284        my_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
9285        loader_platform_thread_unlock_mutex(&globalLock);
9286    }
9287    return res;
9288}
9289
9290VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9291                                                                  const VkAllocationCallbacks *pAllocator,
9292                                                                  VkRenderPass *pRenderPass) {
9293    VkBool32 skip_call = VK_FALSE;
9294    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9295    loader_platform_thread_lock_mutex(&globalLock);
9296    // Create DAG
9297    std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
9298    std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
9299    skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
9300    // Validate
9301    skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
9302    if (VK_FALSE != skip_call) {
9303        return VK_ERROR_VALIDATION_FAILED_EXT;
9304    }
9305    loader_platform_thread_unlock_mutex(&globalLock);
9306    VkResult result = dev_data->device_dispatch_table->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
9307    if (VK_SUCCESS == result) {
9308        loader_platform_thread_lock_mutex(&globalLock);
9309#if MTMERGE
9310        // MTMTODO : Merge with code from below to eliminate duplication
9311        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9312            VkAttachmentDescription desc = pCreateInfo->pAttachments[i];
9313            MT_PASS_ATTACHMENT_INFO pass_info;
9314            pass_info.load_op = desc.loadOp;
9315            pass_info.store_op = desc.storeOp;
9316            pass_info.attachment = i;
9317            dev_data->passMap[*pRenderPass].attachments.push_back(pass_info);
9318        }
9319        // TODO: Maybe fill list and then copy instead of locking
9320        std::unordered_map<uint32_t, bool> &attachment_first_read = dev_data->passMap[*pRenderPass].attachment_first_read;
9321        std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout = dev_data->passMap[*pRenderPass].attachment_first_layout;
9322        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9323            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9324            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9325                uint32_t attachment = subpass.pInputAttachments[j].attachment;
9326                if (attachment_first_read.count(attachment))
9327                    continue;
9328                attachment_first_read.insert(std::make_pair(attachment, true));
9329                attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
9330            }
9331            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9332                uint32_t attachment = subpass.pColorAttachments[j].attachment;
9333                if (attachment_first_read.count(attachment))
9334                    continue;
9335                attachment_first_read.insert(std::make_pair(attachment, false));
9336                attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
9337            }
9338            if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9339                uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9340                if (attachment_first_read.count(attachment))
9341                    continue;
9342                attachment_first_read.insert(std::make_pair(attachment, false));
9343                attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
9344            }
9345        }
9346#endif
9347        // TODOSC : Merge in tracking of renderpass from shader_checker
9348        // Shadow create info and store in map
9349        VkRenderPassCreateInfo *localRPCI = new VkRenderPassCreateInfo(*pCreateInfo);
9350        if (pCreateInfo->pAttachments) {
9351            localRPCI->pAttachments = new VkAttachmentDescription[localRPCI->attachmentCount];
9352            memcpy((void *)localRPCI->pAttachments, pCreateInfo->pAttachments,
9353                   localRPCI->attachmentCount * sizeof(VkAttachmentDescription));
9354        }
9355        if (pCreateInfo->pSubpasses) {
9356            localRPCI->pSubpasses = new VkSubpassDescription[localRPCI->subpassCount];
9357            memcpy((void *)localRPCI->pSubpasses, pCreateInfo->pSubpasses, localRPCI->subpassCount * sizeof(VkSubpassDescription));
9358
9359            for (uint32_t i = 0; i < localRPCI->subpassCount; i++) {
9360                VkSubpassDescription *subpass = (VkSubpassDescription *)&localRPCI->pSubpasses[i];
9361                const uint32_t attachmentCount = subpass->inputAttachmentCount +
9362                                                 subpass->colorAttachmentCount * (1 + (subpass->pResolveAttachments ? 1 : 0)) +
9363                                                 ((subpass->pDepthStencilAttachment) ? 1 : 0) + subpass->preserveAttachmentCount;
9364                VkAttachmentReference *attachments = new VkAttachmentReference[attachmentCount];
9365
9366                memcpy(attachments, subpass->pInputAttachments, sizeof(attachments[0]) * subpass->inputAttachmentCount);
9367                subpass->pInputAttachments = attachments;
9368                attachments += subpass->inputAttachmentCount;
9369
9370                memcpy(attachments, subpass->pColorAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
9371                subpass->pColorAttachments = attachments;
9372                attachments += subpass->colorAttachmentCount;
9373
9374                if (subpass->pResolveAttachments) {
9375                    memcpy(attachments, subpass->pResolveAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
9376                    subpass->pResolveAttachments = attachments;
9377                    attachments += subpass->colorAttachmentCount;
9378                }
9379
9380                if (subpass->pDepthStencilAttachment) {
9381                    memcpy(attachments, subpass->pDepthStencilAttachment, sizeof(attachments[0]) * 1);
9382                    subpass->pDepthStencilAttachment = attachments;
9383                    attachments += 1;
9384                }
9385
9386                memcpy(attachments, subpass->pPreserveAttachments, sizeof(attachments[0]) * subpass->preserveAttachmentCount);
9387                subpass->pPreserveAttachments = &attachments->attachment;
9388            }
9389        }
9390        if (pCreateInfo->pDependencies) {
9391            localRPCI->pDependencies = new VkSubpassDependency[localRPCI->dependencyCount];
9392            memcpy((void *)localRPCI->pDependencies, pCreateInfo->pDependencies,
9393                   localRPCI->dependencyCount * sizeof(VkSubpassDependency));
9394        }
9395        dev_data->renderPassMap[*pRenderPass] = new RENDER_PASS_NODE(localRPCI);
9396        dev_data->renderPassMap[*pRenderPass]->hasSelfDependency = has_self_dependency;
9397        dev_data->renderPassMap[*pRenderPass]->subpassToNode = subpass_to_node;
9398        loader_platform_thread_unlock_mutex(&globalLock);
9399    }
9400    return result;
9401}
9402// Free the renderpass shadow
9403static void deleteRenderPasses(layer_data *my_data) {
9404    if (my_data->renderPassMap.size() <= 0)
9405        return;
9406    for (auto ii = my_data->renderPassMap.begin(); ii != my_data->renderPassMap.end(); ++ii) {
9407        const VkRenderPassCreateInfo *pRenderPassInfo = (*ii).second->pCreateInfo;
9408        delete[] pRenderPassInfo->pAttachments;
9409        if (pRenderPassInfo->pSubpasses) {
9410            for (uint32_t i = 0; i < pRenderPassInfo->subpassCount; ++i) {
9411                // Attachements are all allocated in a block, so just need to
9412                //  find the first non-null one to delete
9413                if (pRenderPassInfo->pSubpasses[i].pInputAttachments) {
9414                    delete[] pRenderPassInfo->pSubpasses[i].pInputAttachments;
9415                } else if (pRenderPassInfo->pSubpasses[i].pColorAttachments) {
9416                    delete[] pRenderPassInfo->pSubpasses[i].pColorAttachments;
9417                } else if (pRenderPassInfo->pSubpasses[i].pResolveAttachments) {
9418                    delete[] pRenderPassInfo->pSubpasses[i].pResolveAttachments;
9419                } else if (pRenderPassInfo->pSubpasses[i].pPreserveAttachments) {
9420                    delete[] pRenderPassInfo->pSubpasses[i].pPreserveAttachments;
9421                }
9422            }
9423            delete[] pRenderPassInfo->pSubpasses;
9424        }
9425        delete[] pRenderPassInfo->pDependencies;
9426        delete pRenderPassInfo;
9427        delete (*ii).second;
9428    }
9429    my_data->renderPassMap.clear();
9430}
9431
9432VkBool32 VerifyFramebufferAndRenderPassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) {
9433    VkBool32 skip_call = VK_FALSE;
9434    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9435    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9436    const VkRenderPassCreateInfo *pRenderPassInfo = dev_data->renderPassMap[pRenderPassBegin->renderPass]->pCreateInfo;
9437    const VkFramebufferCreateInfo framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer].createInfo;
9438    if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
9439        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9440                             DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
9441                                                                 "with a different number of attachments.");
9442    }
9443    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9444        const VkImageView &image_view = framebufferInfo.pAttachments[i];
9445        auto image_data = dev_data->imageViewMap.find(image_view);
9446        assert(image_data != dev_data->imageViewMap.end());
9447        const VkImage &image = image_data->second.image;
9448        const VkImageSubresourceRange &subRange = image_data->second.subresourceRange;
9449        IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
9450                                             pRenderPassInfo->pAttachments[i].initialLayout};
9451        // TODO: Do not iterate over every possibility - consolidate where possible
9452        for (uint32_t j = 0; j < subRange.levelCount; j++) {
9453            uint32_t level = subRange.baseMipLevel + j;
9454            for (uint32_t k = 0; k < subRange.layerCount; k++) {
9455                uint32_t layer = subRange.baseArrayLayer + k;
9456                VkImageSubresource sub = {subRange.aspectMask, level, layer};
9457                IMAGE_CMD_BUF_LAYOUT_NODE node;
9458                if (!FindLayout(pCB, image, sub, node)) {
9459                    SetLayout(pCB, image, sub, newNode);
9460                    continue;
9461                }
9462                if (newNode.layout != node.layout) {
9463                    skip_call |=
9464                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9465                                DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using attachment %i "
9466                                                                    "where the "
9467                                                                    "intial layout differs from the starting layout.",
9468                                i);
9469                }
9470            }
9471        }
9472    }
9473    return skip_call;
9474}
9475
9476void TransitionSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const int subpass_index) {
9477    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9478    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9479    auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9480    if (render_pass_data == dev_data->renderPassMap.end()) {
9481        return;
9482    }
9483    const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo;
9484    auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer);
9485    if (framebuffer_data == dev_data->frameBufferMap.end()) {
9486        return;
9487    }
9488    const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo;
9489    const VkSubpassDescription &subpass = pRenderPassInfo->pSubpasses[subpass_index];
9490    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9491        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pInputAttachments[j].attachment];
9492        SetLayout(dev_data, pCB, image_view, subpass.pInputAttachments[j].layout);
9493    }
9494    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9495        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pColorAttachments[j].attachment];
9496        SetLayout(dev_data, pCB, image_view, subpass.pColorAttachments[j].layout);
9497    }
9498    if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
9499        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pDepthStencilAttachment->attachment];
9500        SetLayout(dev_data, pCB, image_view, subpass.pDepthStencilAttachment->layout);
9501    }
9502}
9503
9504VkBool32 validatePrimaryCommandBuffer(const layer_data *my_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) {
9505    VkBool32 skip_call = VK_FALSE;
9506    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
9507        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9508                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.",
9509                             cmd_name.c_str());
9510    }
9511    return skip_call;
9512}
9513
9514void TransitionFinalSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) {
9515    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9516    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9517    auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9518    if (render_pass_data == dev_data->renderPassMap.end()) {
9519        return;
9520    }
9521    const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo;
9522    auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer);
9523    if (framebuffer_data == dev_data->frameBufferMap.end()) {
9524        return;
9525    }
9526    const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo;
9527    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9528        const VkImageView &image_view = framebufferInfo.pAttachments[i];
9529        SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
9530    }
9531}
9532
9533VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
9534vkCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
9535    VkBool32 skipCall = VK_FALSE;
9536    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9537    loader_platform_thread_lock_mutex(&globalLock);
9538    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9539    if (pCB) {
9540        if (pRenderPassBegin && pRenderPassBegin->renderPass) {
9541#if MTMERGE
9542            auto pass_data = dev_data->passMap.find(pRenderPassBegin->renderPass);
9543            if (pass_data != dev_data->passMap.end()) {
9544                MT_PASS_INFO &pass_info = pass_data->second;
9545                pass_info.fb = pRenderPassBegin->framebuffer;
9546                auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
9547                for (size_t i = 0; i < pass_info.attachments.size(); ++i) {
9548                    MT_FB_ATTACHMENT_INFO &fb_info = dev_data->fbMap[pass_info.fb].attachments[i];
9549                    if (pass_info.attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
9550                        if (cb_data != dev_data->commandBufferMap.end()) {
9551                            std::function<VkBool32()> function = [=]() {
9552                                set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9553                                return VK_FALSE;
9554                            };
9555                            cb_data->second->validate_functions.push_back(function);
9556                        }
9557                        VkImageLayout &attachment_layout = pass_info.attachment_first_layout[pass_info.attachments[i].attachment];
9558                        if (attachment_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL ||
9559                            attachment_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
9560                            skipCall |=
9561                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9562                                        VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, (uint64_t)(pRenderPassBegin->renderPass), __LINE__,
9563                                        MEMTRACK_INVALID_LAYOUT, "MEM", "Cannot clear attachment %d with invalid first layout %d.",
9564                                        pass_info.attachments[i].attachment, attachment_layout);
9565                        }
9566                    } else if (pass_info.attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE) {
9567                        if (cb_data != dev_data->commandBufferMap.end()) {
9568                            std::function<VkBool32()> function = [=]() {
9569                                set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9570                                return VK_FALSE;
9571                            };
9572                            cb_data->second->validate_functions.push_back(function);
9573                        }
9574                    } else if (pass_info.attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_LOAD) {
9575                        if (cb_data != dev_data->commandBufferMap.end()) {
9576                            std::function<VkBool32()> function = [=]() {
9577                                return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9578                            };
9579                            cb_data->second->validate_functions.push_back(function);
9580                        }
9581                    }
9582                    if (pass_info.attachment_first_read[pass_info.attachments[i].attachment]) {
9583                        if (cb_data != dev_data->commandBufferMap.end()) {
9584                            std::function<VkBool32()> function = [=]() {
9585                                return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9586                            };
9587                            cb_data->second->validate_functions.push_back(function);
9588                        }
9589                    }
9590                }
9591            }
9592#endif
9593            skipCall |= VerifyFramebufferAndRenderPassLayouts(commandBuffer, pRenderPassBegin);
9594            auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9595            if (render_pass_data != dev_data->renderPassMap.end()) {
9596                skipCall |= ValidateDependencies(dev_data, pRenderPassBegin, render_pass_data->second->subpassToNode);
9597            }
9598            skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBeginRenderPass");
9599            skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdBeginRenderPass");
9600            skipCall |= addCmd(dev_data, pCB, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
9601            pCB->activeRenderPass = pRenderPassBegin->renderPass;
9602            // This is a shallow copy as that is all that is needed for now
9603            pCB->activeRenderPassBeginInfo = *pRenderPassBegin;
9604            pCB->activeSubpass = 0;
9605            pCB->activeSubpassContents = contents;
9606            pCB->framebuffer = pRenderPassBegin->framebuffer;
9607            // Connect this framebuffer to this cmdBuffer
9608            dev_data->frameBufferMap[pCB->framebuffer].referencingCmdBuffers.insert(pCB->commandBuffer);
9609        } else {
9610            skipCall |=
9611                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9612                        DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()");
9613        }
9614    }
9615    loader_platform_thread_unlock_mutex(&globalLock);
9616    if (VK_FALSE == skipCall) {
9617        dev_data->device_dispatch_table->CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
9618        loader_platform_thread_lock_mutex(&globalLock);
9619        // This is a shallow copy as that is all that is needed for now
9620        dev_data->renderPassBeginInfo = *pRenderPassBegin;
9621        dev_data->currentSubpass = 0;
9622        loader_platform_thread_unlock_mutex(&globalLock);
9623    }
9624}
9625
9626VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
9627    VkBool32 skipCall = VK_FALSE;
9628    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9629    loader_platform_thread_lock_mutex(&globalLock);
9630    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9631    TransitionSubpassLayouts(commandBuffer, &dev_data->renderPassBeginInfo, ++dev_data->currentSubpass);
9632    if (pCB) {
9633        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass");
9634        skipCall |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
9635        pCB->activeSubpass++;
9636        pCB->activeSubpassContents = contents;
9637        TransitionSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
9638        if (pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline) {
9639            skipCall |= validatePipelineState(dev_data, pCB, VK_PIPELINE_BIND_POINT_GRAPHICS,
9640                                              pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
9641        }
9642        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass");
9643    }
9644    loader_platform_thread_unlock_mutex(&globalLock);
9645    if (VK_FALSE == skipCall)
9646        dev_data->device_dispatch_table->CmdNextSubpass(commandBuffer, contents);
9647}
9648
9649VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(VkCommandBuffer commandBuffer) {
9650    VkBool32 skipCall = VK_FALSE;
9651    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9652    loader_platform_thread_lock_mutex(&globalLock);
9653#if MTMERGE
9654    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
9655    if (cb_data != dev_data->commandBufferMap.end()) {
9656        auto pass_data = dev_data->passMap.find(cb_data->second->activeRenderPass);
9657        if (pass_data != dev_data->passMap.end()) {
9658            MT_PASS_INFO &pass_info = pass_data->second;
9659            for (size_t i = 0; i < pass_info.attachments.size(); ++i) {
9660                MT_FB_ATTACHMENT_INFO &fb_info = dev_data->fbMap[pass_info.fb].attachments[i];
9661                if (pass_info.attachments[i].store_op == VK_ATTACHMENT_STORE_OP_STORE) {
9662                    if (cb_data != dev_data->commandBufferMap.end()) {
9663                        std::function<VkBool32()> function = [=]() {
9664                            set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9665                            return VK_FALSE;
9666                        };
9667                        cb_data->second->validate_functions.push_back(function);
9668                    }
9669                } else if (pass_info.attachments[i].store_op == VK_ATTACHMENT_STORE_OP_DONT_CARE) {
9670                    if (cb_data != dev_data->commandBufferMap.end()) {
9671                        std::function<VkBool32()> function = [=]() {
9672                            set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9673                            return VK_FALSE;
9674                        };
9675                        cb_data->second->validate_functions.push_back(function);
9676                    }
9677                }
9678            }
9679        }
9680    }
9681#endif
9682    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9683    TransitionFinalSubpassLayouts(commandBuffer, &dev_data->renderPassBeginInfo);
9684    if (pCB) {
9685        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass");
9686        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass");
9687        skipCall |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
9688        TransitionFinalSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo);
9689        pCB->activeRenderPass = 0;
9690        pCB->activeSubpass = 0;
9691    }
9692    loader_platform_thread_unlock_mutex(&globalLock);
9693    if (VK_FALSE == skipCall)
9694        dev_data->device_dispatch_table->CmdEndRenderPass(commandBuffer);
9695}
9696
9697bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass,
9698                                 VkRenderPass primaryPass, uint32_t primaryAttach, uint32_t secondaryAttach, const char *msg) {
9699    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9700                   DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9701                   "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64
9702                   " that is not compatible with the current render pass %" PRIx64 "."
9703                   "Attachment %" PRIu32 " is not compatable with %" PRIu32 ". %s",
9704                   (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass), primaryAttach, secondaryAttach,
9705                   msg);
9706}
9707
9708bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9709                                     uint32_t primaryAttach, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass,
9710                                     uint32_t secondaryAttach, bool is_multi) {
9711    bool skip_call = false;
9712    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9713    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9714    if (primary_data->second->pCreateInfo->attachmentCount <= primaryAttach) {
9715        primaryAttach = VK_ATTACHMENT_UNUSED;
9716    }
9717    if (secondary_data->second->pCreateInfo->attachmentCount <= secondaryAttach) {
9718        secondaryAttach = VK_ATTACHMENT_UNUSED;
9719    }
9720    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
9721        return skip_call;
9722    }
9723    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
9724        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9725                                                 secondaryAttach, "The first is unused while the second is not.");
9726        return skip_call;
9727    }
9728    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
9729        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9730                                                 secondaryAttach, "The second is unused while the first is not.");
9731        return skip_call;
9732    }
9733    if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].format !=
9734        secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].format) {
9735        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9736                                                 secondaryAttach, "They have different formats.");
9737    }
9738    if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].samples !=
9739        secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].samples) {
9740        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9741                                                 secondaryAttach, "They have different samples.");
9742    }
9743    if (is_multi &&
9744        primary_data->second->pCreateInfo->pAttachments[primaryAttach].flags !=
9745            secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].flags) {
9746        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9747                                                 secondaryAttach, "They have different flags.");
9748    }
9749    return skip_call;
9750}
9751
9752bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9753                                  VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass, const int subpass, bool is_multi) {
9754    bool skip_call = false;
9755    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9756    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9757    const VkSubpassDescription &primary_desc = primary_data->second->pCreateInfo->pSubpasses[subpass];
9758    const VkSubpassDescription &secondary_desc = secondary_data->second->pCreateInfo->pSubpasses[subpass];
9759    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
9760    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
9761        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
9762        if (i < primary_desc.inputAttachmentCount) {
9763            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
9764        }
9765        if (i < secondary_desc.inputAttachmentCount) {
9766            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
9767        }
9768        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_input_attach, secondaryBuffer,
9769                                                     secondaryPass, secondary_input_attach, is_multi);
9770    }
9771    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
9772    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
9773        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
9774        if (i < primary_desc.colorAttachmentCount) {
9775            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
9776        }
9777        if (i < secondary_desc.colorAttachmentCount) {
9778            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
9779        }
9780        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_color_attach, secondaryBuffer,
9781                                                     secondaryPass, secondary_color_attach, is_multi);
9782        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
9783        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
9784            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
9785        }
9786        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
9787            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
9788        }
9789        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_resolve_attach, secondaryBuffer,
9790                                                     secondaryPass, secondary_resolve_attach, is_multi);
9791    }
9792    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
9793    if (primary_desc.pDepthStencilAttachment) {
9794        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
9795    }
9796    if (secondary_desc.pDepthStencilAttachment) {
9797        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
9798    }
9799    skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_depthstencil_attach, secondaryBuffer,
9800                                                 secondaryPass, secondary_depthstencil_attach, is_multi);
9801    return skip_call;
9802}
9803
9804bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9805                                     VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass) {
9806    bool skip_call = false;
9807    // Early exit if renderPass objects are identical (and therefore compatible)
9808    if (primaryPass == secondaryPass)
9809        return skip_call;
9810    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9811    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9812    if (primary_data == dev_data->renderPassMap.end() || primary_data->second == nullptr) {
9813        skip_call |=
9814            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9815                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9816                    "vkCmdExecuteCommands() called w/ invalid current Cmd Buffer %p which has invalid render pass %" PRIx64 ".",
9817                    (void *)primaryBuffer, (uint64_t)(primaryPass));
9818        return skip_call;
9819    }
9820    if (secondary_data == dev_data->renderPassMap.end() || secondary_data->second == nullptr) {
9821        skip_call |=
9822            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9823                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9824                    "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer %p which has invalid render pass %" PRIx64 ".",
9825                    (void *)secondaryBuffer, (uint64_t)(secondaryPass));
9826        return skip_call;
9827    }
9828    if (primary_data->second->pCreateInfo->subpassCount != secondary_data->second->pCreateInfo->subpassCount) {
9829        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9830                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9831                             "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64
9832                             " that is not compatible with the current render pass %" PRIx64 "."
9833                             "They have a different number of subpasses.",
9834                             (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass));
9835        return skip_call;
9836    }
9837    bool is_multi = primary_data->second->pCreateInfo->subpassCount > 1;
9838    for (uint32_t i = 0; i < primary_data->second->pCreateInfo->subpassCount; ++i) {
9839        skip_call |=
9840            validateSubpassCompatibility(dev_data, primaryBuffer, primaryPass, secondaryBuffer, secondaryPass, i, is_multi);
9841    }
9842    return skip_call;
9843}
9844
9845bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
9846                         VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
9847    bool skip_call = false;
9848    if (!pSubCB->beginInfo.pInheritanceInfo) {
9849        return skip_call;
9850    }
9851    VkFramebuffer primary_fb = pCB->framebuffer;
9852    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
9853    if (secondary_fb != VK_NULL_HANDLE) {
9854        if (primary_fb != secondary_fb) {
9855            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9856                                 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9857                                 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a framebuffer %" PRIx64
9858                                 " that is not compatible with the current framebuffer %" PRIx64 ".",
9859                                 (void *)secondaryBuffer, (uint64_t)(secondary_fb), (uint64_t)(primary_fb));
9860        }
9861        auto fb_data = dev_data->frameBufferMap.find(secondary_fb);
9862        if (fb_data == dev_data->frameBufferMap.end()) {
9863            skip_call |=
9864                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9865                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
9866                                                                          "which has invalid framebuffer %" PRIx64 ".",
9867                        (void *)secondaryBuffer, (uint64_t)(secondary_fb));
9868            return skip_call;
9869        }
9870        skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb_data->second.createInfo.renderPass,
9871                                                     secondaryBuffer, pSubCB->beginInfo.pInheritanceInfo->renderPass);
9872    }
9873    return skip_call;
9874}
9875
9876bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
9877    bool skipCall = false;
9878    unordered_set<int> activeTypes;
9879    for (auto queryObject : pCB->activeQueries) {
9880        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9881        if (queryPoolData != dev_data->queryPoolMap.end()) {
9882            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
9883                pSubCB->beginInfo.pInheritanceInfo) {
9884                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
9885                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
9886                    skipCall |= log_msg(
9887                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9888                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9889                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
9890                        "which has invalid active query pool %" PRIx64 ". Pipeline statistics is being queried so the command "
9891                        "buffer must have all bits set on the queryPool.",
9892                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first));
9893                }
9894            }
9895            activeTypes.insert(queryPoolData->second.createInfo.queryType);
9896        }
9897    }
9898    for (auto queryObject : pSubCB->startedQueries) {
9899        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9900        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
9901            skipCall |=
9902                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9903                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9904                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
9905                        "which has invalid active query pool %" PRIx64 "of type %d but a query of that type has been started on "
9906                        "secondary Cmd Buffer %p.",
9907                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first),
9908                        queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer));
9909        }
9910    }
9911    return skipCall;
9912}
9913
9914VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
9915vkCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
9916    VkBool32 skipCall = VK_FALSE;
9917    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9918    loader_platform_thread_lock_mutex(&globalLock);
9919    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9920    if (pCB) {
9921        GLOBAL_CB_NODE *pSubCB = NULL;
9922        for (uint32_t i = 0; i < commandBuffersCount; i++) {
9923            pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
9924            if (!pSubCB) {
9925                skipCall |=
9926                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9927                            DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9928                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p in element %u of pCommandBuffers array.",
9929                            (void *)pCommandBuffers[i], i);
9930            } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
9931                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9932                                    __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9933                                    "vkCmdExecuteCommands() called w/ Primary Cmd Buffer %p in element %u of pCommandBuffers "
9934                                    "array. All cmd buffers in pCommandBuffers array must be secondary.",
9935                                    (void *)pCommandBuffers[i], i);
9936            } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
9937                if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
9938                    skipCall |= log_msg(
9939                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9940                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
9941                        "vkCmdExecuteCommands(): Secondary Command Buffer (%p) executed within render pass (%#" PRIxLEAST64
9942                        ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
9943                        (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass);
9944                } else {
9945                    // Make sure render pass is compatible with parent command buffer pass if has continue
9946                    skipCall |= validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass, pCommandBuffers[i],
9947                                                                pSubCB->beginInfo.pInheritanceInfo->renderPass);
9948                    skipCall |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
9949                }
9950                string errorString = "";
9951                if (!verify_renderpass_compatibility(dev_data, pCB->activeRenderPass,
9952                                                     pSubCB->beginInfo.pInheritanceInfo->renderPass, errorString)) {
9953                    skipCall |= log_msg(
9954                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9955                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
9956                        "vkCmdExecuteCommands(): Secondary Command Buffer (%p) w/ render pass (%#" PRIxLEAST64
9957                        ") is incompatible w/ primary command buffer (%p) w/ render pass (%#" PRIxLEAST64 ") due to: %s",
9958                        (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer,
9959                        (uint64_t)pCB->activeRenderPass, errorString.c_str());
9960                }
9961                //  If framebuffer for secondary CB is not NULL, then it must match FB from vkCmdBeginRenderPass()
9962                //   that this CB will be executed in AND framebuffer must have been created w/ RP compatible w/ renderpass
9963                if (pSubCB->beginInfo.pInheritanceInfo->framebuffer) {
9964                    if (pSubCB->beginInfo.pInheritanceInfo->framebuffer != pCB->activeRenderPassBeginInfo.framebuffer) {
9965                        skipCall |= log_msg(
9966                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9967                            (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS",
9968                            "vkCmdExecuteCommands(): Secondary Command Buffer (%p) references framebuffer (%#" PRIxLEAST64
9969                            ") that does not match framebuffer (%#" PRIxLEAST64 ") in active renderpass (%#" PRIxLEAST64 ").",
9970                            (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->framebuffer,
9971                            (uint64_t)pCB->activeRenderPassBeginInfo.framebuffer, (uint64_t)pCB->activeRenderPass);
9972                    }
9973                }
9974            }
9975            // TODO(mlentine): Move more logic into this method
9976            skipCall |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
9977            skipCall |= validateCommandBufferState(dev_data, pSubCB);
9978            // Secondary cmdBuffers are considered pending execution starting w/
9979            // being recorded
9980            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
9981                if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
9982                    skipCall |= log_msg(
9983                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9984                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
9985                        "Attempt to simultaneously execute CB %#" PRIxLEAST64 " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
9986                        "set!",
9987                        (uint64_t)(pCB->commandBuffer));
9988                }
9989                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
9990                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
9991                    skipCall |= log_msg(
9992                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9993                        (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
9994                        "vkCmdExecuteCommands(): Secondary Command Buffer (%#" PRIxLEAST64
9995                        ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
9996                        "(%#" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
9997                                          "set, even though it does.",
9998                        (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer));
9999                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
10000                }
10001            }
10002            if (!pCB->activeQueries.empty() && !dev_data->physDevProperties.features.inheritedQueries) {
10003                skipCall |=
10004                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10005                            reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
10006                            "vkCmdExecuteCommands(): Secondary Command Buffer "
10007                            "(%#" PRIxLEAST64 ") cannot be submitted with a query in "
10008                            "flight and inherited queries not "
10009                            "supported on this device.",
10010                            reinterpret_cast<uint64_t>(pCommandBuffers[i]));
10011            }
10012            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
10013            pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
10014            dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
10015        }
10016        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands");
10017        skipCall |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
10018    }
10019    loader_platform_thread_unlock_mutex(&globalLock);
10020    if (VK_FALSE == skipCall)
10021        dev_data->device_dispatch_table->CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
10022}
10023
10024VkBool32 ValidateMapImageLayouts(VkDevice device, VkDeviceMemory mem) {
10025    VkBool32 skip_call = VK_FALSE;
10026    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10027    auto mem_data = dev_data->memObjMap.find(mem);
10028    if ((mem_data != dev_data->memObjMap.end()) && (mem_data->second.image != VK_NULL_HANDLE)) {
10029        std::vector<VkImageLayout> layouts;
10030        if (FindLayouts(dev_data, mem_data->second.image, layouts)) {
10031            for (auto layout : layouts) {
10032                if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
10033                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10034                                         __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
10035                                                                                         "GENERAL or PREINITIALIZED are supported.",
10036                                         string_VkImageLayout(layout));
10037                }
10038            }
10039        }
10040    }
10041    return skip_call;
10042}
10043
10044VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10045vkMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
10046    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10047
10048    VkBool32 skip_call = VK_FALSE;
10049    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10050    loader_platform_thread_lock_mutex(&globalLock);
10051#if MTMERGE
10052    DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
10053    if (pMemObj) {
10054        pMemObj->valid = true;
10055        if ((memProps.memoryTypes[pMemObj->allocInfo.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
10056            skip_call =
10057                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10058                        (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM",
10059                        "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj %#" PRIxLEAST64, (uint64_t)mem);
10060        }
10061    }
10062    skip_call |= validateMemRange(dev_data, mem, offset, size);
10063    storeMemRanges(dev_data, mem, offset, size);
10064#endif
10065    skip_call |= ValidateMapImageLayouts(device, mem);
10066    loader_platform_thread_unlock_mutex(&globalLock);
10067
10068    if (VK_FALSE == skip_call) {
10069        result = dev_data->device_dispatch_table->MapMemory(device, mem, offset, size, flags, ppData);
10070#if MTMERGE
10071        initializeAndTrackMemory(dev_data, mem, size, ppData);
10072#endif
10073    }
10074    return result;
10075}
10076
10077#if MTMERGE
10078VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkUnmapMemory(VkDevice device, VkDeviceMemory mem) {
10079    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10080    VkBool32 skipCall = VK_FALSE;
10081
10082    loader_platform_thread_lock_mutex(&globalLock);
10083    skipCall |= deleteMemRanges(my_data, mem);
10084    loader_platform_thread_unlock_mutex(&globalLock);
10085    if (VK_FALSE == skipCall) {
10086        my_data->device_dispatch_table->UnmapMemory(device, mem);
10087    }
10088}
10089
10090VkBool32 validateMemoryIsMapped(layer_data *my_data, const char *funcName, uint32_t memRangeCount,
10091                                const VkMappedMemoryRange *pMemRanges) {
10092    VkBool32 skipCall = VK_FALSE;
10093    for (uint32_t i = 0; i < memRangeCount; ++i) {
10094        auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
10095        if (mem_element != my_data->memObjMap.end()) {
10096            if (mem_element->second.memRange.offset > pMemRanges[i].offset) {
10097                skipCall |= log_msg(
10098                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10099                    (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10100                    "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
10101                    "(" PRINTF_SIZE_T_SPECIFIER ").",
10102                    funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_element->second.memRange.offset));
10103            }
10104            if ((mem_element->second.memRange.size != VK_WHOLE_SIZE) &&
10105                ((mem_element->second.memRange.offset + mem_element->second.memRange.size) <
10106                 (pMemRanges[i].offset + pMemRanges[i].size))) {
10107                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10108                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10109                                    MEMTRACK_INVALID_MAP, "MEM", "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER
10110                                                                 ") exceeds the Memory Object's upper-bound "
10111                                                                 "(" PRINTF_SIZE_T_SPECIFIER ").",
10112                                    funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
10113                                    static_cast<size_t>(mem_element->second.memRange.offset + mem_element->second.memRange.size));
10114            }
10115        }
10116    }
10117    return skipCall;
10118}
10119
10120VkBool32 validateAndCopyNoncoherentMemoryToDriver(layer_data *my_data, uint32_t memRangeCount,
10121                                                  const VkMappedMemoryRange *pMemRanges) {
10122    VkBool32 skipCall = VK_FALSE;
10123    for (uint32_t i = 0; i < memRangeCount; ++i) {
10124        auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
10125        if (mem_element != my_data->memObjMap.end()) {
10126            if (mem_element->second.pData) {
10127                VkDeviceSize size = mem_element->second.memRange.size;
10128                VkDeviceSize half_size = (size / 2);
10129                char *data = static_cast<char *>(mem_element->second.pData);
10130                for (auto j = 0; j < half_size; ++j) {
10131                    if (data[j] != NoncoherentMemoryFillValue) {
10132                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10133                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10134                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64,
10135                                            (uint64_t)pMemRanges[i].memory);
10136                    }
10137                }
10138                for (auto j = size + half_size; j < 2 * size; ++j) {
10139                    if (data[j] != NoncoherentMemoryFillValue) {
10140                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10141                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10142                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64,
10143                                            (uint64_t)pMemRanges[i].memory);
10144                    }
10145                }
10146                memcpy(mem_element->second.pDriverData, static_cast<void *>(data + (size_t)(half_size)), (size_t)(size));
10147            }
10148        }
10149    }
10150    return skipCall;
10151}
10152
10153VK_LAYER_EXPORT VkResult VKAPI_CALL
10154vkFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10155    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10156    VkBool32 skipCall = VK_FALSE;
10157    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10158
10159    loader_platform_thread_lock_mutex(&globalLock);
10160    skipCall |= validateAndCopyNoncoherentMemoryToDriver(my_data, memRangeCount, pMemRanges);
10161    skipCall |= validateMemoryIsMapped(my_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
10162    loader_platform_thread_unlock_mutex(&globalLock);
10163    if (VK_FALSE == skipCall) {
10164        result = my_data->device_dispatch_table->FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
10165    }
10166    return result;
10167}
10168
10169VK_LAYER_EXPORT VkResult VKAPI_CALL
10170vkInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10171    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10172    VkBool32 skipCall = VK_FALSE;
10173    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10174
10175    loader_platform_thread_lock_mutex(&globalLock);
10176    skipCall |= validateMemoryIsMapped(my_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
10177    loader_platform_thread_unlock_mutex(&globalLock);
10178    if (VK_FALSE == skipCall) {
10179        result = my_data->device_dispatch_table->InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
10180    }
10181    return result;
10182}
10183#endif
10184
10185VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
10186    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10187    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10188#if MTMERGE
10189    loader_platform_thread_lock_mutex(&globalLock);
10190    // Track objects tied to memory
10191    uint64_t image_handle = (uint64_t)(image);
10192    VkBool32 skipCall =
10193        set_mem_binding(dev_data, device, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
10194    add_object_binding_info(dev_data, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, mem);
10195    {
10196        VkMemoryRequirements memRequirements;
10197        vkGetImageMemoryRequirements(device, image, &memRequirements);
10198        skipCall |= validate_buffer_image_aliasing(dev_data, image_handle, mem, memoryOffset, memRequirements,
10199                                                   dev_data->memObjMap[mem].imageRanges, dev_data->memObjMap[mem].bufferRanges,
10200                                                   VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
10201    }
10202    print_mem_list(dev_data, device);
10203    loader_platform_thread_unlock_mutex(&globalLock);
10204#endif
10205    if (VK_FALSE == skipCall) {
10206        result = dev_data->device_dispatch_table->BindImageMemory(device, image, mem, memoryOffset);
10207        VkMemoryRequirements memRequirements;
10208        dev_data->device_dispatch_table->GetImageMemoryRequirements(device, image, &memRequirements);
10209        loader_platform_thread_lock_mutex(&globalLock);
10210        dev_data->memObjMap[mem].image = image;
10211        dev_data->imageMap[image].mem = mem;
10212        dev_data->imageMap[image].memOffset = memoryOffset;
10213        dev_data->imageMap[image].memSize = memRequirements.size;
10214        loader_platform_thread_unlock_mutex(&globalLock);
10215    }
10216    return result;
10217}
10218
10219VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent(VkDevice device, VkEvent event) {
10220    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10221    loader_platform_thread_lock_mutex(&globalLock);
10222    dev_data->eventMap[event].needsSignaled = false;
10223    dev_data->eventMap[event].stageMask = VK_PIPELINE_STAGE_HOST_BIT;
10224    loader_platform_thread_unlock_mutex(&globalLock);
10225    VkResult result = dev_data->device_dispatch_table->SetEvent(device, event);
10226    return result;
10227}
10228
10229VKAPI_ATTR VkResult VKAPI_CALL
10230vkQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
10231    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10232    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10233    VkBool32 skip_call = VK_FALSE;
10234#if MTMERGE
10235    //MTMTODO : Merge this code with the checks below
10236    loader_platform_thread_lock_mutex(&globalLock);
10237
10238    for (uint32_t i = 0; i < bindInfoCount; i++) {
10239        const VkBindSparseInfo *bindInfo = &pBindInfo[i];
10240        // Track objects tied to memory
10241        for (uint32_t j = 0; j < bindInfo->bufferBindCount; j++) {
10242            for (uint32_t k = 0; k < bindInfo->pBufferBinds[j].bindCount; k++) {
10243                if (set_sparse_mem_binding(dev_data, queue, bindInfo->pBufferBinds[j].pBinds[k].memory,
10244                                           (uint64_t)bindInfo->pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
10245                                           "vkQueueBindSparse"))
10246                    skip_call = VK_TRUE;
10247            }
10248        }
10249        for (uint32_t j = 0; j < bindInfo->imageOpaqueBindCount; j++) {
10250            for (uint32_t k = 0; k < bindInfo->pImageOpaqueBinds[j].bindCount; k++) {
10251                if (set_sparse_mem_binding(dev_data, queue, bindInfo->pImageOpaqueBinds[j].pBinds[k].memory,
10252                                           (uint64_t)bindInfo->pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10253                                           "vkQueueBindSparse"))
10254                    skip_call = VK_TRUE;
10255            }
10256        }
10257        for (uint32_t j = 0; j < bindInfo->imageBindCount; j++) {
10258            for (uint32_t k = 0; k < bindInfo->pImageBinds[j].bindCount; k++) {
10259                if (set_sparse_mem_binding(dev_data, queue, bindInfo->pImageBinds[j].pBinds[k].memory,
10260                                           (uint64_t)bindInfo->pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10261                                           "vkQueueBindSparse"))
10262                    skip_call = VK_TRUE;
10263            }
10264        }
10265        // Validate semaphore state
10266        for (uint32_t i = 0; i < bindInfo->waitSemaphoreCount; i++) {
10267            VkSemaphore sem = bindInfo->pWaitSemaphores[i];
10268
10269            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10270                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_SIGNALLED) {
10271                    skip_call =
10272                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10273                                (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
10274                                "vkQueueBindSparse: Semaphore must be in signaled state before passing to pWaitSemaphores");
10275                }
10276                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_WAIT;
10277            }
10278        }
10279        for (uint32_t i = 0; i < bindInfo->signalSemaphoreCount; i++) {
10280            VkSemaphore sem = bindInfo->pSignalSemaphores[i];
10281
10282            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10283                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_UNSET) {
10284                    skip_call =
10285                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10286                                (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
10287                                "vkQueueBindSparse: Semaphore must not be currently signaled or in a wait state");
10288                }
10289                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
10290            }
10291        }
10292    }
10293
10294    print_mem_list(dev_data, queue);
10295    loader_platform_thread_unlock_mutex(&globalLock);
10296#endif
10297    loader_platform_thread_lock_mutex(&globalLock);
10298    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
10299        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
10300        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
10301            if (dev_data->semaphoreMap[bindInfo.pWaitSemaphores[i]].signaled) {
10302                dev_data->semaphoreMap[bindInfo.pWaitSemaphores[i]].signaled = 0;
10303            } else {
10304                skip_call |=
10305                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
10306                            __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10307                            "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
10308                            (uint64_t)(queue), (uint64_t)(bindInfo.pWaitSemaphores[i]));
10309            }
10310        }
10311        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
10312            dev_data->semaphoreMap[bindInfo.pSignalSemaphores[i]].signaled = 1;
10313        }
10314    }
10315    loader_platform_thread_unlock_mutex(&globalLock);
10316
10317    if (VK_FALSE == skip_call)
10318        return dev_data->device_dispatch_table->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
10319#if MTMERGE
10320    // Update semaphore state
10321    loader_platform_thread_lock_mutex(&globalLock);
10322    for (uint32_t bind_info_idx = 0; bind_info_idx < bindInfoCount; bind_info_idx++) {
10323        const VkBindSparseInfo *bindInfo = &pBindInfo[bind_info_idx];
10324        for (uint32_t i = 0; i < bindInfo->waitSemaphoreCount; i++) {
10325            VkSemaphore sem = bindInfo->pWaitSemaphores[i];
10326
10327            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10328                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_UNSET;
10329            }
10330        }
10331    }
10332    loader_platform_thread_unlock_mutex(&globalLock);
10333#endif
10334
10335    return result;
10336}
10337
10338VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
10339                                                 const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
10340    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10341    VkResult result = dev_data->device_dispatch_table->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
10342    if (result == VK_SUCCESS) {
10343        loader_platform_thread_lock_mutex(&globalLock);
10344        SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
10345        sNode->signaled = 0;
10346        sNode->queue = VK_NULL_HANDLE;
10347        sNode->in_use.store(0);
10348        sNode->state = MEMTRACK_SEMAPHORE_STATE_UNSET;
10349        loader_platform_thread_unlock_mutex(&globalLock);
10350    }
10351    return result;
10352}
10353
10354VKAPI_ATTR VkResult VKAPI_CALL
10355vkCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
10356    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10357    VkResult result = dev_data->device_dispatch_table->CreateEvent(device, pCreateInfo, pAllocator, pEvent);
10358    if (result == VK_SUCCESS) {
10359        loader_platform_thread_lock_mutex(&globalLock);
10360        dev_data->eventMap[*pEvent].needsSignaled = false;
10361        dev_data->eventMap[*pEvent].in_use.store(0);
10362        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
10363        loader_platform_thread_unlock_mutex(&globalLock);
10364    }
10365    return result;
10366}
10367
10368VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
10369                                                                    const VkAllocationCallbacks *pAllocator,
10370                                                                    VkSwapchainKHR *pSwapchain) {
10371    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10372    VkResult result = dev_data->device_dispatch_table->CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
10373
10374    if (VK_SUCCESS == result) {
10375        SWAPCHAIN_NODE *psc_node = new SWAPCHAIN_NODE(pCreateInfo);
10376        loader_platform_thread_lock_mutex(&globalLock);
10377        dev_data->device_extensions.swapchainMap[*pSwapchain] = psc_node;
10378        loader_platform_thread_unlock_mutex(&globalLock);
10379    }
10380
10381    return result;
10382}
10383
10384VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
10385vkDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
10386    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10387    bool skipCall = false;
10388
10389    loader_platform_thread_lock_mutex(&globalLock);
10390    auto swapchain_data = dev_data->device_extensions.swapchainMap.find(swapchain);
10391    if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
10392        if (swapchain_data->second->images.size() > 0) {
10393            for (auto swapchain_image : swapchain_data->second->images) {
10394                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
10395                if (image_sub != dev_data->imageSubresourceMap.end()) {
10396                    for (auto imgsubpair : image_sub->second) {
10397                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
10398                        if (image_item != dev_data->imageLayoutMap.end()) {
10399                            dev_data->imageLayoutMap.erase(image_item);
10400                        }
10401                    }
10402                    dev_data->imageSubresourceMap.erase(image_sub);
10403                }
10404                skipCall = clear_object_binding(dev_data, device, (uint64_t)swapchain_image,
10405                                                VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
10406                dev_data->imageBindingMap.erase((uint64_t)swapchain_image);
10407            }
10408        }
10409        delete swapchain_data->second;
10410        dev_data->device_extensions.swapchainMap.erase(swapchain);
10411    }
10412    loader_platform_thread_unlock_mutex(&globalLock);
10413    if (!skipCall)
10414        dev_data->device_dispatch_table->DestroySwapchainKHR(device, swapchain, pAllocator);
10415}
10416
10417VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10418vkGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
10419    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10420    VkResult result = dev_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
10421
10422    if (result == VK_SUCCESS && pSwapchainImages != NULL) {
10423        // This should never happen and is checked by param checker.
10424        if (!pCount)
10425            return result;
10426        loader_platform_thread_lock_mutex(&globalLock);
10427        const size_t count = *pCount;
10428        auto swapchain_node = dev_data->device_extensions.swapchainMap[swapchain];
10429        if (!swapchain_node->images.empty()) {
10430            // TODO : Not sure I like the memcmp here, but it works
10431            const bool mismatch = (swapchain_node->images.size() != count ||
10432                                   memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
10433            if (mismatch) {
10434                // TODO: Verify against Valid Usage section of extension
10435                log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10436                        (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
10437                        "vkGetSwapchainInfoKHR(%" PRIu64
10438                        ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
10439                        (uint64_t)(swapchain));
10440            }
10441        }
10442        for (uint32_t i = 0; i < *pCount; ++i) {
10443            IMAGE_LAYOUT_NODE image_layout_node;
10444            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
10445            image_layout_node.format = swapchain_node->createInfo.imageFormat;
10446            dev_data->imageMap[pSwapchainImages[i]].createInfo.mipLevels = 1;
10447            dev_data->imageMap[pSwapchainImages[i]].createInfo.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
10448            swapchain_node->images.push_back(pSwapchainImages[i]);
10449            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
10450            dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
10451            dev_data->imageLayoutMap[subpair] = image_layout_node;
10452            dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
10453        }
10454        if (!swapchain_node->images.empty()) {
10455            for (auto image : swapchain_node->images) {
10456                // Add image object binding, then insert the new Mem Object and then bind it to created image
10457                add_object_create_info(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10458                                       &swapchain_node->createInfo);
10459            }
10460        }
10461        loader_platform_thread_unlock_mutex(&globalLock);
10462    }
10463    return result;
10464}
10465
10466VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
10467    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10468    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10469    bool skip_call = false;
10470
10471    if (pPresentInfo) {
10472        loader_platform_thread_lock_mutex(&globalLock);
10473        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10474            if (dev_data->semaphoreMap[pPresentInfo->pWaitSemaphores[i]].signaled) {
10475                dev_data->semaphoreMap[pPresentInfo->pWaitSemaphores[i]].signaled = 0;
10476            } else {
10477                skip_call |=
10478                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
10479                            __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10480                            "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
10481                            (uint64_t)(queue), (uint64_t)(pPresentInfo->pWaitSemaphores[i]));
10482            }
10483        }
10484        VkDeviceMemory mem;
10485        for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
10486            auto swapchain_data = dev_data->device_extensions.swapchainMap.find(pPresentInfo->pSwapchains[i]);
10487            if (swapchain_data != dev_data->device_extensions.swapchainMap.end() &&
10488                pPresentInfo->pImageIndices[i] < swapchain_data->second->images.size()) {
10489                VkImage image = swapchain_data->second->images[pPresentInfo->pImageIndices[i]];
10490                skip_call |=
10491                    get_mem_binding_from_object(dev_data, queue, (uint64_t)(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
10492                skip_call |= validate_memory_is_valid(dev_data, mem, "vkQueuePresentKHR()", image);
10493                vector<VkImageLayout> layouts;
10494                if (FindLayouts(dev_data, image, layouts)) {
10495                    for (auto layout : layouts) {
10496                        if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
10497                            skip_call |=
10498                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
10499                                        reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10500                                        "Images passed to present must be in layout "
10501                                        "PRESENT_SOURCE_KHR but is in %s",
10502                                        string_VkImageLayout(layout));
10503                        }
10504                    }
10505                }
10506            }
10507        }
10508        loader_platform_thread_unlock_mutex(&globalLock);
10509    }
10510
10511    if (!skip_call)
10512        result = dev_data->device_dispatch_table->QueuePresentKHR(queue, pPresentInfo);
10513#if MTMERGE
10514    loader_platform_thread_lock_mutex(&globalLock);
10515    for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; i++) {
10516        VkSemaphore sem = pPresentInfo->pWaitSemaphores[i];
10517        if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10518            dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_UNSET;
10519        }
10520    }
10521    loader_platform_thread_unlock_mutex(&globalLock);
10522#endif
10523    return result;
10524}
10525
10526VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
10527                                                     VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
10528    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10529    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10530    bool skipCall = false;
10531#if MTMERGE
10532    loader_platform_thread_lock_mutex(&globalLock);
10533    if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
10534        if (dev_data->semaphoreMap[semaphore].state != MEMTRACK_SEMAPHORE_STATE_UNSET) {
10535            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10536                               (uint64_t)semaphore, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
10537                               "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state");
10538        }
10539        dev_data->semaphoreMap[semaphore].state = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
10540    }
10541    auto fence_data = dev_data->fenceMap.find(fence);
10542    if (fence_data != dev_data->fenceMap.end()) {
10543        fence_data->second.swapchain = swapchain;
10544    }
10545    loader_platform_thread_unlock_mutex(&globalLock);
10546#endif
10547    if (!skipCall) {
10548        result =
10549            dev_data->device_dispatch_table->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
10550    }
10551    loader_platform_thread_lock_mutex(&globalLock);
10552    // FIXME/TODO: Need to add some thing code the "fence" parameter
10553    dev_data->semaphoreMap[semaphore].signaled = 1;
10554    loader_platform_thread_unlock_mutex(&globalLock);
10555    return result;
10556}
10557
10558VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10559vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
10560                               const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
10561    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10562    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10563    VkResult res = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
10564    if (VK_SUCCESS == res) {
10565        loader_platform_thread_lock_mutex(&globalLock);
10566        res = layer_create_msg_callback(my_data->report_data, pCreateInfo, pAllocator, pMsgCallback);
10567        loader_platform_thread_unlock_mutex(&globalLock);
10568    }
10569    return res;
10570}
10571
10572VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkInstance instance,
10573                                                                           VkDebugReportCallbackEXT msgCallback,
10574                                                                           const VkAllocationCallbacks *pAllocator) {
10575    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10576    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10577    pTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
10578    loader_platform_thread_lock_mutex(&globalLock);
10579    layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator);
10580    loader_platform_thread_unlock_mutex(&globalLock);
10581}
10582
10583VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
10584vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
10585                        size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
10586    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10587    my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
10588                                                            pMsg);
10589}
10590
10591VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
10592    if (!strcmp(funcName, "vkGetDeviceProcAddr"))
10593        return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
10594    if (!strcmp(funcName, "vkDestroyDevice"))
10595        return (PFN_vkVoidFunction)vkDestroyDevice;
10596    if (!strcmp(funcName, "vkQueueSubmit"))
10597        return (PFN_vkVoidFunction)vkQueueSubmit;
10598    if (!strcmp(funcName, "vkWaitForFences"))
10599        return (PFN_vkVoidFunction)vkWaitForFences;
10600    if (!strcmp(funcName, "vkGetFenceStatus"))
10601        return (PFN_vkVoidFunction)vkGetFenceStatus;
10602    if (!strcmp(funcName, "vkQueueWaitIdle"))
10603        return (PFN_vkVoidFunction)vkQueueWaitIdle;
10604    if (!strcmp(funcName, "vkDeviceWaitIdle"))
10605        return (PFN_vkVoidFunction)vkDeviceWaitIdle;
10606    if (!strcmp(funcName, "vkGetDeviceQueue"))
10607        return (PFN_vkVoidFunction)vkGetDeviceQueue;
10608    if (!strcmp(funcName, "vkDestroyInstance"))
10609        return (PFN_vkVoidFunction)vkDestroyInstance;
10610    if (!strcmp(funcName, "vkDestroyDevice"))
10611        return (PFN_vkVoidFunction)vkDestroyDevice;
10612    if (!strcmp(funcName, "vkDestroyFence"))
10613        return (PFN_vkVoidFunction)vkDestroyFence;
10614    if (!strcmp(funcName, "vkResetFences"))
10615        return (PFN_vkVoidFunction)vkResetFences;
10616    if (!strcmp(funcName, "vkDestroySemaphore"))
10617        return (PFN_vkVoidFunction)vkDestroySemaphore;
10618    if (!strcmp(funcName, "vkDestroyEvent"))
10619        return (PFN_vkVoidFunction)vkDestroyEvent;
10620    if (!strcmp(funcName, "vkDestroyQueryPool"))
10621        return (PFN_vkVoidFunction)vkDestroyQueryPool;
10622    if (!strcmp(funcName, "vkDestroyBuffer"))
10623        return (PFN_vkVoidFunction)vkDestroyBuffer;
10624    if (!strcmp(funcName, "vkDestroyBufferView"))
10625        return (PFN_vkVoidFunction)vkDestroyBufferView;
10626    if (!strcmp(funcName, "vkDestroyImage"))
10627        return (PFN_vkVoidFunction)vkDestroyImage;
10628    if (!strcmp(funcName, "vkDestroyImageView"))
10629        return (PFN_vkVoidFunction)vkDestroyImageView;
10630    if (!strcmp(funcName, "vkDestroyShaderModule"))
10631        return (PFN_vkVoidFunction)vkDestroyShaderModule;
10632    if (!strcmp(funcName, "vkDestroyPipeline"))
10633        return (PFN_vkVoidFunction)vkDestroyPipeline;
10634    if (!strcmp(funcName, "vkDestroyPipelineLayout"))
10635        return (PFN_vkVoidFunction)vkDestroyPipelineLayout;
10636    if (!strcmp(funcName, "vkDestroySampler"))
10637        return (PFN_vkVoidFunction)vkDestroySampler;
10638    if (!strcmp(funcName, "vkDestroyDescriptorSetLayout"))
10639        return (PFN_vkVoidFunction)vkDestroyDescriptorSetLayout;
10640    if (!strcmp(funcName, "vkDestroyDescriptorPool"))
10641        return (PFN_vkVoidFunction)vkDestroyDescriptorPool;
10642    if (!strcmp(funcName, "vkDestroyFramebuffer"))
10643        return (PFN_vkVoidFunction)vkDestroyFramebuffer;
10644    if (!strcmp(funcName, "vkDestroyRenderPass"))
10645        return (PFN_vkVoidFunction)vkDestroyRenderPass;
10646    if (!strcmp(funcName, "vkCreateBuffer"))
10647        return (PFN_vkVoidFunction)vkCreateBuffer;
10648    if (!strcmp(funcName, "vkCreateBufferView"))
10649        return (PFN_vkVoidFunction)vkCreateBufferView;
10650    if (!strcmp(funcName, "vkCreateImage"))
10651        return (PFN_vkVoidFunction)vkCreateImage;
10652    if (!strcmp(funcName, "vkCreateImageView"))
10653        return (PFN_vkVoidFunction)vkCreateImageView;
10654    if (!strcmp(funcName, "vkCreateFence"))
10655        return (PFN_vkVoidFunction)vkCreateFence;
10656    if (!strcmp(funcName, "CreatePipelineCache"))
10657        return (PFN_vkVoidFunction)vkCreatePipelineCache;
10658    if (!strcmp(funcName, "DestroyPipelineCache"))
10659        return (PFN_vkVoidFunction)vkDestroyPipelineCache;
10660    if (!strcmp(funcName, "GetPipelineCacheData"))
10661        return (PFN_vkVoidFunction)vkGetPipelineCacheData;
10662    if (!strcmp(funcName, "MergePipelineCaches"))
10663        return (PFN_vkVoidFunction)vkMergePipelineCaches;
10664    if (!strcmp(funcName, "vkCreateGraphicsPipelines"))
10665        return (PFN_vkVoidFunction)vkCreateGraphicsPipelines;
10666    if (!strcmp(funcName, "vkCreateComputePipelines"))
10667        return (PFN_vkVoidFunction)vkCreateComputePipelines;
10668    if (!strcmp(funcName, "vkCreateSampler"))
10669        return (PFN_vkVoidFunction)vkCreateSampler;
10670    if (!strcmp(funcName, "vkCreateDescriptorSetLayout"))
10671        return (PFN_vkVoidFunction)vkCreateDescriptorSetLayout;
10672    if (!strcmp(funcName, "vkCreatePipelineLayout"))
10673        return (PFN_vkVoidFunction)vkCreatePipelineLayout;
10674    if (!strcmp(funcName, "vkCreateDescriptorPool"))
10675        return (PFN_vkVoidFunction)vkCreateDescriptorPool;
10676    if (!strcmp(funcName, "vkResetDescriptorPool"))
10677        return (PFN_vkVoidFunction)vkResetDescriptorPool;
10678    if (!strcmp(funcName, "vkAllocateDescriptorSets"))
10679        return (PFN_vkVoidFunction)vkAllocateDescriptorSets;
10680    if (!strcmp(funcName, "vkFreeDescriptorSets"))
10681        return (PFN_vkVoidFunction)vkFreeDescriptorSets;
10682    if (!strcmp(funcName, "vkUpdateDescriptorSets"))
10683        return (PFN_vkVoidFunction)vkUpdateDescriptorSets;
10684    if (!strcmp(funcName, "vkCreateCommandPool"))
10685        return (PFN_vkVoidFunction)vkCreateCommandPool;
10686    if (!strcmp(funcName, "vkDestroyCommandPool"))
10687        return (PFN_vkVoidFunction)vkDestroyCommandPool;
10688    if (!strcmp(funcName, "vkResetCommandPool"))
10689        return (PFN_vkVoidFunction)vkResetCommandPool;
10690    if (!strcmp(funcName, "vkCreateQueryPool"))
10691        return (PFN_vkVoidFunction)vkCreateQueryPool;
10692    if (!strcmp(funcName, "vkAllocateCommandBuffers"))
10693        return (PFN_vkVoidFunction)vkAllocateCommandBuffers;
10694    if (!strcmp(funcName, "vkFreeCommandBuffers"))
10695        return (PFN_vkVoidFunction)vkFreeCommandBuffers;
10696    if (!strcmp(funcName, "vkBeginCommandBuffer"))
10697        return (PFN_vkVoidFunction)vkBeginCommandBuffer;
10698    if (!strcmp(funcName, "vkEndCommandBuffer"))
10699        return (PFN_vkVoidFunction)vkEndCommandBuffer;
10700    if (!strcmp(funcName, "vkResetCommandBuffer"))
10701        return (PFN_vkVoidFunction)vkResetCommandBuffer;
10702    if (!strcmp(funcName, "vkCmdBindPipeline"))
10703        return (PFN_vkVoidFunction)vkCmdBindPipeline;
10704    if (!strcmp(funcName, "vkCmdSetViewport"))
10705        return (PFN_vkVoidFunction)vkCmdSetViewport;
10706    if (!strcmp(funcName, "vkCmdSetScissor"))
10707        return (PFN_vkVoidFunction)vkCmdSetScissor;
10708    if (!strcmp(funcName, "vkCmdSetLineWidth"))
10709        return (PFN_vkVoidFunction)vkCmdSetLineWidth;
10710    if (!strcmp(funcName, "vkCmdSetDepthBias"))
10711        return (PFN_vkVoidFunction)vkCmdSetDepthBias;
10712    if (!strcmp(funcName, "vkCmdSetBlendConstants"))
10713        return (PFN_vkVoidFunction)vkCmdSetBlendConstants;
10714    if (!strcmp(funcName, "vkCmdSetDepthBounds"))
10715        return (PFN_vkVoidFunction)vkCmdSetDepthBounds;
10716    if (!strcmp(funcName, "vkCmdSetStencilCompareMask"))
10717        return (PFN_vkVoidFunction)vkCmdSetStencilCompareMask;
10718    if (!strcmp(funcName, "vkCmdSetStencilWriteMask"))
10719        return (PFN_vkVoidFunction)vkCmdSetStencilWriteMask;
10720    if (!strcmp(funcName, "vkCmdSetStencilReference"))
10721        return (PFN_vkVoidFunction)vkCmdSetStencilReference;
10722    if (!strcmp(funcName, "vkCmdBindDescriptorSets"))
10723        return (PFN_vkVoidFunction)vkCmdBindDescriptorSets;
10724    if (!strcmp(funcName, "vkCmdBindVertexBuffers"))
10725        return (PFN_vkVoidFunction)vkCmdBindVertexBuffers;
10726    if (!strcmp(funcName, "vkCmdBindIndexBuffer"))
10727        return (PFN_vkVoidFunction)vkCmdBindIndexBuffer;
10728    if (!strcmp(funcName, "vkCmdDraw"))
10729        return (PFN_vkVoidFunction)vkCmdDraw;
10730    if (!strcmp(funcName, "vkCmdDrawIndexed"))
10731        return (PFN_vkVoidFunction)vkCmdDrawIndexed;
10732    if (!strcmp(funcName, "vkCmdDrawIndirect"))
10733        return (PFN_vkVoidFunction)vkCmdDrawIndirect;
10734    if (!strcmp(funcName, "vkCmdDrawIndexedIndirect"))
10735        return (PFN_vkVoidFunction)vkCmdDrawIndexedIndirect;
10736    if (!strcmp(funcName, "vkCmdDispatch"))
10737        return (PFN_vkVoidFunction)vkCmdDispatch;
10738    if (!strcmp(funcName, "vkCmdDispatchIndirect"))
10739        return (PFN_vkVoidFunction)vkCmdDispatchIndirect;
10740    if (!strcmp(funcName, "vkCmdCopyBuffer"))
10741        return (PFN_vkVoidFunction)vkCmdCopyBuffer;
10742    if (!strcmp(funcName, "vkCmdCopyImage"))
10743        return (PFN_vkVoidFunction)vkCmdCopyImage;
10744    if (!strcmp(funcName, "vkCmdBlitImage"))
10745        return (PFN_vkVoidFunction)vkCmdBlitImage;
10746    if (!strcmp(funcName, "vkCmdCopyBufferToImage"))
10747        return (PFN_vkVoidFunction)vkCmdCopyBufferToImage;
10748    if (!strcmp(funcName, "vkCmdCopyImageToBuffer"))
10749        return (PFN_vkVoidFunction)vkCmdCopyImageToBuffer;
10750    if (!strcmp(funcName, "vkCmdUpdateBuffer"))
10751        return (PFN_vkVoidFunction)vkCmdUpdateBuffer;
10752    if (!strcmp(funcName, "vkCmdFillBuffer"))
10753        return (PFN_vkVoidFunction)vkCmdFillBuffer;
10754    if (!strcmp(funcName, "vkCmdClearColorImage"))
10755        return (PFN_vkVoidFunction)vkCmdClearColorImage;
10756    if (!strcmp(funcName, "vkCmdClearDepthStencilImage"))
10757        return (PFN_vkVoidFunction)vkCmdClearDepthStencilImage;
10758    if (!strcmp(funcName, "vkCmdClearAttachments"))
10759        return (PFN_vkVoidFunction)vkCmdClearAttachments;
10760    if (!strcmp(funcName, "vkCmdResolveImage"))
10761        return (PFN_vkVoidFunction)vkCmdResolveImage;
10762    if (!strcmp(funcName, "vkCmdSetEvent"))
10763        return (PFN_vkVoidFunction)vkCmdSetEvent;
10764    if (!strcmp(funcName, "vkCmdResetEvent"))
10765        return (PFN_vkVoidFunction)vkCmdResetEvent;
10766    if (!strcmp(funcName, "vkCmdWaitEvents"))
10767        return (PFN_vkVoidFunction)vkCmdWaitEvents;
10768    if (!strcmp(funcName, "vkCmdPipelineBarrier"))
10769        return (PFN_vkVoidFunction)vkCmdPipelineBarrier;
10770    if (!strcmp(funcName, "vkCmdBeginQuery"))
10771        return (PFN_vkVoidFunction)vkCmdBeginQuery;
10772    if (!strcmp(funcName, "vkCmdEndQuery"))
10773        return (PFN_vkVoidFunction)vkCmdEndQuery;
10774    if (!strcmp(funcName, "vkCmdResetQueryPool"))
10775        return (PFN_vkVoidFunction)vkCmdResetQueryPool;
10776    if (!strcmp(funcName, "vkCmdCopyQueryPoolResults"))
10777        return (PFN_vkVoidFunction)vkCmdCopyQueryPoolResults;
10778    if (!strcmp(funcName, "vkCmdPushConstants"))
10779        return (PFN_vkVoidFunction)vkCmdPushConstants;
10780    if (!strcmp(funcName, "vkCmdWriteTimestamp"))
10781        return (PFN_vkVoidFunction)vkCmdWriteTimestamp;
10782    if (!strcmp(funcName, "vkCreateFramebuffer"))
10783        return (PFN_vkVoidFunction)vkCreateFramebuffer;
10784    if (!strcmp(funcName, "vkCreateShaderModule"))
10785        return (PFN_vkVoidFunction)vkCreateShaderModule;
10786    if (!strcmp(funcName, "vkCreateRenderPass"))
10787        return (PFN_vkVoidFunction)vkCreateRenderPass;
10788    if (!strcmp(funcName, "vkCmdBeginRenderPass"))
10789        return (PFN_vkVoidFunction)vkCmdBeginRenderPass;
10790    if (!strcmp(funcName, "vkCmdNextSubpass"))
10791        return (PFN_vkVoidFunction)vkCmdNextSubpass;
10792    if (!strcmp(funcName, "vkCmdEndRenderPass"))
10793        return (PFN_vkVoidFunction)vkCmdEndRenderPass;
10794    if (!strcmp(funcName, "vkCmdExecuteCommands"))
10795        return (PFN_vkVoidFunction)vkCmdExecuteCommands;
10796    if (!strcmp(funcName, "vkSetEvent"))
10797        return (PFN_vkVoidFunction)vkSetEvent;
10798    if (!strcmp(funcName, "vkMapMemory"))
10799        return (PFN_vkVoidFunction)vkMapMemory;
10800#if MTMERGE
10801    if (!strcmp(funcName, "vkUnmapMemory"))
10802        return (PFN_vkVoidFunction)vkUnmapMemory;
10803    if (!strcmp(funcName, "vkAllocateMemory"))
10804        return (PFN_vkVoidFunction)vkAllocateMemory;
10805    if (!strcmp(funcName, "vkFreeMemory"))
10806        return (PFN_vkVoidFunction)vkFreeMemory;
10807    if (!strcmp(funcName, "vkFlushMappedMemoryRanges"))
10808        return (PFN_vkVoidFunction)vkFlushMappedMemoryRanges;
10809    if (!strcmp(funcName, "vkInvalidateMappedMemoryRanges"))
10810        return (PFN_vkVoidFunction)vkInvalidateMappedMemoryRanges;
10811    if (!strcmp(funcName, "vkBindBufferMemory"))
10812        return (PFN_vkVoidFunction)vkBindBufferMemory;
10813    if (!strcmp(funcName, "vkGetBufferMemoryRequirements"))
10814        return (PFN_vkVoidFunction)vkGetBufferMemoryRequirements;
10815    if (!strcmp(funcName, "vkGetImageMemoryRequirements"))
10816        return (PFN_vkVoidFunction)vkGetImageMemoryRequirements;
10817#endif
10818    if (!strcmp(funcName, "vkGetQueryPoolResults"))
10819        return (PFN_vkVoidFunction)vkGetQueryPoolResults;
10820    if (!strcmp(funcName, "vkBindImageMemory"))
10821        return (PFN_vkVoidFunction)vkBindImageMemory;
10822    if (!strcmp(funcName, "vkQueueBindSparse"))
10823        return (PFN_vkVoidFunction)vkQueueBindSparse;
10824    if (!strcmp(funcName, "vkCreateSemaphore"))
10825        return (PFN_vkVoidFunction)vkCreateSemaphore;
10826    if (!strcmp(funcName, "vkCreateEvent"))
10827        return (PFN_vkVoidFunction)vkCreateEvent;
10828
10829    if (dev == NULL)
10830        return NULL;
10831
10832    layer_data *dev_data;
10833    dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
10834
10835    if (dev_data->device_extensions.wsi_enabled) {
10836        if (!strcmp(funcName, "vkCreateSwapchainKHR"))
10837            return (PFN_vkVoidFunction)vkCreateSwapchainKHR;
10838        if (!strcmp(funcName, "vkDestroySwapchainKHR"))
10839            return (PFN_vkVoidFunction)vkDestroySwapchainKHR;
10840        if (!strcmp(funcName, "vkGetSwapchainImagesKHR"))
10841            return (PFN_vkVoidFunction)vkGetSwapchainImagesKHR;
10842        if (!strcmp(funcName, "vkAcquireNextImageKHR"))
10843            return (PFN_vkVoidFunction)vkAcquireNextImageKHR;
10844        if (!strcmp(funcName, "vkQueuePresentKHR"))
10845            return (PFN_vkVoidFunction)vkQueuePresentKHR;
10846    }
10847
10848    VkLayerDispatchTable *pTable = dev_data->device_dispatch_table;
10849    {
10850        if (pTable->GetDeviceProcAddr == NULL)
10851            return NULL;
10852        return pTable->GetDeviceProcAddr(dev, funcName);
10853    }
10854}
10855
10856VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
10857    if (!strcmp(funcName, "vkGetInstanceProcAddr"))
10858        return (PFN_vkVoidFunction)vkGetInstanceProcAddr;
10859    if (!strcmp(funcName, "vkGetDeviceProcAddr"))
10860        return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
10861    if (!strcmp(funcName, "vkCreateInstance"))
10862        return (PFN_vkVoidFunction)vkCreateInstance;
10863    if (!strcmp(funcName, "vkCreateDevice"))
10864        return (PFN_vkVoidFunction)vkCreateDevice;
10865    if (!strcmp(funcName, "vkDestroyInstance"))
10866        return (PFN_vkVoidFunction)vkDestroyInstance;
10867#if MTMERGE
10868    if (!strcmp(funcName, "vkGetPhysicalDeviceMemoryProperties"))
10869        return (PFN_vkVoidFunction)vkGetPhysicalDeviceMemoryProperties;
10870#endif
10871    if (!strcmp(funcName, "vkEnumerateInstanceLayerProperties"))
10872        return (PFN_vkVoidFunction)vkEnumerateInstanceLayerProperties;
10873    if (!strcmp(funcName, "vkEnumerateInstanceExtensionProperties"))
10874        return (PFN_vkVoidFunction)vkEnumerateInstanceExtensionProperties;
10875    if (!strcmp(funcName, "vkEnumerateDeviceLayerProperties"))
10876        return (PFN_vkVoidFunction)vkEnumerateDeviceLayerProperties;
10877    if (!strcmp(funcName, "vkEnumerateDeviceExtensionProperties"))
10878        return (PFN_vkVoidFunction)vkEnumerateDeviceExtensionProperties;
10879
10880    if (instance == NULL)
10881        return NULL;
10882
10883    PFN_vkVoidFunction fptr;
10884
10885    layer_data *my_data;
10886    my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10887    fptr = debug_report_get_instance_proc_addr(my_data->report_data, funcName);
10888    if (fptr)
10889        return fptr;
10890
10891    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10892    if (pTable->GetInstanceProcAddr == NULL)
10893        return NULL;
10894    return pTable->GetInstanceProcAddr(instance, funcName);
10895}
10896