core_validation.cpp revision 51db9745081fb7d5eaf6d00cfe5dcc1f82c1730c
1/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and/or associated documentation files (the "Materials"), to
8 * deal in the Materials without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Materials, and to permit persons to whom the Materials
11 * are furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice(s) and this permission notice shall be included
14 * in all copies or substantial portions of the Materials.
15 *
16 * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 *
20 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE
23 * USE OR OTHER DEALINGS IN THE MATERIALS
24 *
25 * Author: Cody Northrop <cnorthrop@google.com>
26 * Author: Michael Lentine <mlentine@google.com>
27 * Author: Tobin Ehlis <tobine@google.com>
28 * Author: Chia-I Wu <olv@google.com>
29 * Author: Chris Forbes <chrisf@ijw.co.nz>
30 * Author: Mark Lobodzinski <mark@lunarg.com>
31 * Author: Ian Elliott <ianelliott@google.com>
32 */
33
34// Allow use of STL min and max functions in Windows
35#define NOMINMAX
36
37// Turn on mem_tracker merged code
38#define MTMERGESOURCE 1
39
40#include <SPIRV/spirv.hpp>
41#include <algorithm>
42#include <assert.h>
43#include <iostream>
44#include <list>
45#include <map>
46#include <set>
47#include <stdio.h>
48#include <stdlib.h>
49#include <string.h>
50#include <string>
51#include <unordered_map>
52#include <unordered_set>
53
54#include "vk_loader_platform.h"
55#include "vk_dispatch_table_helper.h"
56#include "vk_struct_string_helper_cpp.h"
57#if defined(__GNUC__)
58#pragma GCC diagnostic ignored "-Wwrite-strings"
59#endif
60#if defined(__GNUC__)
61#pragma GCC diagnostic warning "-Wwrite-strings"
62#endif
63#include "vk_struct_size_helper.h"
64#include "core_validation.h"
65#include "vk_layer_config.h"
66#include "vk_layer_table.h"
67#include "vk_layer_data.h"
68#include "vk_layer_logging.h"
69#include "vk_layer_extension_utils.h"
70#include "vk_layer_utils.h"
71
72#if defined __ANDROID__
73#include <android/log.h>
74#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
75#else
76#define LOGCONSOLE(...) printf(__VA_ARGS__)
77#endif
78
79using std::unordered_map;
80using std::unordered_set;
81
82// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
83// Object value will be used to identify them internally.
84static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
85
86// Track command pools and their command buffers
87struct CMD_POOL_INFO {
88    VkCommandPoolCreateFlags createFlags;
89    uint32_t queueFamilyIndex;
90    list<VkCommandBuffer> commandBuffers; // list container of cmd buffers allocated from this pool
91};
92
93struct devExts {
94    bool wsi_enabled;
95    unordered_map<VkSwapchainKHR, SWAPCHAIN_NODE *> swapchainMap;
96    unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
97};
98
99// fwd decls
100struct shader_module;
101
102// TODO : Split this into separate structs for instance and device level data?
103struct layer_data {
104    debug_report_data *report_data;
105    std::vector<VkDebugReportCallbackEXT> logging_callback;
106    VkLayerDispatchTable *device_dispatch_table;
107    VkLayerInstanceDispatchTable *instance_dispatch_table;
108
109    devExts device_extensions;
110    uint64_t currentFenceId;
111    unordered_set<VkQueue> queues;  // all queues under given device
112    // Global set of all cmdBuffers that are inFlight on this device
113    unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
114    // Layer specific data
115    unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> sampleMap;
116    unordered_map<VkImageView, VkImageViewCreateInfo> imageViewMap;
117    unordered_map<VkImage, IMAGE_NODE> imageMap;
118    unordered_map<VkBufferView, VkBufferViewCreateInfo> bufferViewMap;
119    unordered_map<VkBuffer, BUFFER_NODE> bufferMap;
120    unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap;
121    unordered_map<VkCommandPool, CMD_POOL_INFO> commandPoolMap;
122    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap;
123    unordered_map<VkDescriptorSet, SET_NODE *> setMap;
124    unordered_map<VkDescriptorSetLayout, LAYOUT_NODE *> descriptorSetLayoutMap;
125    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
126    unordered_map<VkDeviceMemory, DEVICE_MEM_INFO> memObjMap;
127    unordered_map<VkFence, FENCE_NODE> fenceMap;
128    unordered_map<VkQueue, QUEUE_NODE> queueMap;
129    unordered_map<VkEvent, EVENT_NODE> eventMap;
130    unordered_map<QueryObject, bool> queryToStateMap;
131    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
132    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
133    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
134    unordered_map<VkFramebuffer, FRAMEBUFFER_NODE> frameBufferMap;
135    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
136    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
137    unordered_map<VkRenderPass, RENDER_PASS_NODE *> renderPassMap;
138    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
139    VkDevice device;
140
141    // Device specific data
142    PHYS_DEV_PROPERTIES_NODE phys_dev_properties;
143    VkPhysicalDeviceMemoryProperties phys_dev_mem_props;
144
145    layer_data()
146        : report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr), device_extensions(),
147          currentFenceId(1), device(VK_NULL_HANDLE), phys_dev_properties{},
148          phys_dev_mem_props{} {};
149};
150
151// TODO : Do we need to guard access to layer_data_map w/ lock?
152static unordered_map<void *, layer_data *> layer_data_map;
153
154static const VkLayerProperties cv_global_layers[] = {{
155    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
156}};
157
158template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
159    bool foundLayer = false;
160    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
161        if (!strcmp(createInfo.ppEnabledLayerNames[i], cv_global_layers[0].layerName)) {
162            foundLayer = true;
163        }
164        // This has to be logged to console as we don't have a callback at this point.
165        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
166            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
167                       cv_global_layers[0].layerName);
168        }
169    }
170}
171
172// Code imported from shader_checker
173static void build_def_index(shader_module *);
174
175// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
176// without the caller needing to care too much about the physical SPIRV module layout.
177struct spirv_inst_iter {
178    std::vector<uint32_t>::const_iterator zero;
179    std::vector<uint32_t>::const_iterator it;
180
181    uint32_t len() { return *it >> 16; }
182    uint32_t opcode() { return *it & 0x0ffffu; }
183    uint32_t const &word(unsigned n) { return it[n]; }
184    uint32_t offset() { return (uint32_t)(it - zero); }
185
186    spirv_inst_iter() {}
187
188    spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
189
190    bool operator==(spirv_inst_iter const &other) { return it == other.it; }
191
192    bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
193
194    spirv_inst_iter operator++(int) { /* x++ */
195        spirv_inst_iter ii = *this;
196        it += len();
197        return ii;
198    }
199
200    spirv_inst_iter operator++() { /* ++x; */
201        it += len();
202        return *this;
203    }
204
205    /* The iterator and the value are the same thing. */
206    spirv_inst_iter &operator*() { return *this; }
207    spirv_inst_iter const &operator*() const { return *this; }
208};
209
210struct shader_module {
211    /* the spirv image itself */
212    vector<uint32_t> words;
213    /* a mapping of <id> to the first word of its def. this is useful because walking type
214     * trees, constant expressions, etc requires jumping all over the instruction stream.
215     */
216    unordered_map<unsigned, unsigned> def_index;
217
218    shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
219        : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
220          def_index() {
221
222        build_def_index(this);
223    }
224
225    /* expose begin() / end() to enable range-based for */
226    spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
227    spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }         /* just past last insn */
228    /* given an offset into the module, produce an iterator there. */
229    spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
230
231    /* gets an iterator to the definition of an id */
232    spirv_inst_iter get_def(unsigned id) const {
233        auto it = def_index.find(id);
234        if (it == def_index.end()) {
235            return end();
236        }
237        return at(it->second);
238    }
239};
240
241// TODO : This can be much smarter, using separate locks for separate global data
242static int globalLockInitialized = 0;
243static loader_platform_thread_mutex globalLock;
244#if MTMERGESOURCE
245// MTMERGESOURCE - start of direct pull
246static VkDeviceMemory *get_object_mem_binding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
247    switch (type) {
248    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
249        auto it = my_data->imageMap.find(VkImage(handle));
250        if (it != my_data->imageMap.end())
251            return &(*it).second.mem;
252        break;
253    }
254    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
255        auto it = my_data->bufferMap.find(VkBuffer(handle));
256        if (it != my_data->bufferMap.end())
257            return &(*it).second.mem;
258        break;
259    }
260    default:
261        break;
262    }
263    return nullptr;
264}
265// MTMERGESOURCE - end section
266#endif
267template layer_data *get_my_data_ptr<layer_data>(void *data_key, std::unordered_map<void *, layer_data *> &data_map);
268
269// prototype
270static GLOBAL_CB_NODE *getCBNode(layer_data *, const VkCommandBuffer);
271
272#if MTMERGESOURCE
273// Add a fence, creating one if necessary to our list of fences/fenceIds
274static bool add_fence_info(layer_data *my_data, VkFence fence, VkQueue queue, uint64_t *fenceId) {
275    bool skipCall = false;
276    *fenceId = my_data->currentFenceId++;
277
278    // If no fence, create an internal fence to track the submissions
279    if (fence != VK_NULL_HANDLE) {
280        my_data->fenceMap[fence].fenceId = *fenceId;
281        my_data->fenceMap[fence].queue = queue;
282        // Validate that fence is in UNSIGNALED state
283        VkFenceCreateInfo *pFenceCI = &(my_data->fenceMap[fence].createInfo);
284        if (pFenceCI->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
285            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
286                               (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
287                               "Fence %#" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
288                               (uint64_t)fence);
289        }
290    } else {
291        // TODO : Do we need to create an internal fence here for tracking purposes?
292    }
293    // Update most recently submitted fence and fenceId for Queue
294    my_data->queueMap[queue].lastSubmittedId = *fenceId;
295    return skipCall;
296}
297
298// Record information when a fence is known to be signalled
299static void update_fence_tracking(layer_data *my_data, VkFence fence) {
300    auto fence_item = my_data->fenceMap.find(fence);
301    if (fence_item != my_data->fenceMap.end()) {
302        FENCE_NODE *pCurFenceInfo = &(*fence_item).second;
303        VkQueue queue = pCurFenceInfo->queue;
304        auto queue_item = my_data->queueMap.find(queue);
305        if (queue_item != my_data->queueMap.end()) {
306            QUEUE_NODE *pQueueInfo = &(*queue_item).second;
307            if (pQueueInfo->lastRetiredId < pCurFenceInfo->fenceId) {
308                pQueueInfo->lastRetiredId = pCurFenceInfo->fenceId;
309            }
310        }
311    }
312
313    // Update fence state in fenceCreateInfo structure
314    auto pFCI = &(my_data->fenceMap[fence].createInfo);
315    pFCI->flags = static_cast<VkFenceCreateFlags>(pFCI->flags | VK_FENCE_CREATE_SIGNALED_BIT);
316}
317
318// Helper routine that updates the fence list for a specific queue to all-retired
319static void retire_queue_fences(layer_data *my_data, VkQueue queue) {
320    QUEUE_NODE *pQueueInfo = &my_data->queueMap[queue];
321    // Set queue's lastRetired to lastSubmitted indicating all fences completed
322    pQueueInfo->lastRetiredId = pQueueInfo->lastSubmittedId;
323}
324
325// Helper routine that updates all queues to all-retired
326static void retire_device_fences(layer_data *my_data, VkDevice device) {
327    // Process each queue for device
328    // TODO: Add multiple device support
329    for (auto ii = my_data->queueMap.begin(); ii != my_data->queueMap.end(); ++ii) {
330        // Set queue's lastRetired to lastSubmitted indicating all fences completed
331        QUEUE_NODE *pQueueInfo = &(*ii).second;
332        pQueueInfo->lastRetiredId = pQueueInfo->lastSubmittedId;
333    }
334}
335
336// Helper function to validate correct usage bits set for buffers or images
337//  Verify that (actual & desired) flags != 0 or,
338//   if strict is true, verify that (actual & desired) flags == desired
339//  In case of error, report it via dbg callbacks
340static bool validate_usage_flags(layer_data *my_data, VkFlags actual, VkFlags desired, VkBool32 strict,
341                                     uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str,
342                                     char const *func_name, char const *usage_str) {
343    bool correct_usage = false;
344    bool skipCall = false;
345    if (strict)
346        correct_usage = ((actual & desired) == desired);
347    else
348        correct_usage = ((actual & desired) != 0);
349    if (!correct_usage) {
350        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
351                           MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s %#" PRIxLEAST64
352                                                               " used by %s. In this case, %s should have %s set during creation.",
353                           ty_str, obj_handle, func_name, ty_str, usage_str);
354    }
355    return skipCall;
356}
357
358// Helper function to validate usage flags for images
359// Pulls image info and then sends actual vs. desired usage off to helper above where
360//  an error will be flagged if usage is not correct
361static bool validate_image_usage_flags(layer_data *dev_data, VkImage image, VkFlags desired, VkBool32 strict,
362                                           char const *func_name, char const *usage_string) {
363    bool skipCall = false;
364    auto const image_node = dev_data->imageMap.find(image);
365    if (image_node != dev_data->imageMap.end()) {
366        skipCall = validate_usage_flags(dev_data, image_node->second.createInfo.usage, desired, strict, (uint64_t)image,
367                                        VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "image", func_name, usage_string);
368    }
369    return skipCall;
370}
371
372// Helper function to validate usage flags for buffers
373// Pulls buffer info and then sends actual vs. desired usage off to helper above where
374//  an error will be flagged if usage is not correct
375static bool validate_buffer_usage_flags(layer_data *dev_data, VkBuffer buffer, VkFlags desired, VkBool32 strict,
376                                            char const *func_name, char const *usage_string) {
377    bool skipCall = false;
378    auto const buffer_node = dev_data->bufferMap.find(buffer);
379    if (buffer_node != dev_data->bufferMap.end()) {
380        skipCall = validate_usage_flags(dev_data, buffer_node->second.createInfo.usage, desired, strict, (uint64_t)buffer,
381                                        VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "buffer", func_name, usage_string);
382    }
383    return skipCall;
384}
385
386// Return ptr to info in map container containing mem, or NULL if not found
387//  Calls to this function should be wrapped in mutex
388static DEVICE_MEM_INFO *get_mem_obj_info(layer_data *dev_data, const VkDeviceMemory mem) {
389    auto item = dev_data->memObjMap.find(mem);
390    if (item != dev_data->memObjMap.end()) {
391        return &(*item).second;
392    } else {
393        return NULL;
394    }
395}
396
397static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
398                             const VkMemoryAllocateInfo *pAllocateInfo) {
399    assert(object != NULL);
400
401    memcpy(&my_data->memObjMap[mem].allocInfo, pAllocateInfo, sizeof(VkMemoryAllocateInfo));
402    // TODO:  Update for real hardware, actually process allocation info structures
403    my_data->memObjMap[mem].allocInfo.pNext = NULL;
404    my_data->memObjMap[mem].object = object;
405    my_data->memObjMap[mem].mem = mem;
406    my_data->memObjMap[mem].image = VK_NULL_HANDLE;
407    my_data->memObjMap[mem].memRange.offset = 0;
408    my_data->memObjMap[mem].memRange.size = 0;
409    my_data->memObjMap[mem].pData = 0;
410    my_data->memObjMap[mem].pDriverData = 0;
411    my_data->memObjMap[mem].valid = false;
412}
413
414static bool validate_memory_is_valid(layer_data *dev_data, VkDeviceMemory mem, const char *functionName,
415                                     VkImage image = VK_NULL_HANDLE) {
416    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
417        auto const image_node = dev_data->imageMap.find(image);
418        if (image_node != dev_data->imageMap.end() && !image_node->second.valid) {
419            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
420                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
421                           "%s: Cannot read invalid swapchain image %" PRIx64 ", please fill the memory before using.",
422                           functionName, (uint64_t)(image));
423        }
424    } else {
425        DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
426        if (pMemObj && !pMemObj->valid) {
427            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
428                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
429                           "%s: Cannot read invalid memory %" PRIx64 ", please fill the memory before using.", functionName,
430                           (uint64_t)(mem));
431        }
432    }
433    return false;
434}
435
436static void set_memory_valid(layer_data *dev_data, VkDeviceMemory mem, bool valid, VkImage image = VK_NULL_HANDLE) {
437    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
438        auto image_node = dev_data->imageMap.find(image);
439        if (image_node != dev_data->imageMap.end()) {
440            image_node->second.valid = valid;
441        }
442    } else {
443        DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
444        if (pMemObj) {
445            pMemObj->valid = valid;
446        }
447    }
448}
449
450// Find CB Info and add mem reference to list container
451// Find Mem Obj Info and add CB reference to list container
452static bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
453                                              const char *apiName) {
454    bool skipCall = false;
455
456    // Skip validation if this image was created through WSI
457    if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
458
459        // First update CB binding in MemObj mini CB list
460        DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem);
461        if (pMemInfo) {
462            pMemInfo->commandBufferBindings.insert(cb);
463            // Now update CBInfo's Mem reference list
464            GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
465            // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
466            if (pCBNode) {
467                pCBNode->memObjs.insert(mem);
468            }
469        }
470    }
471    return skipCall;
472}
473// For every mem obj bound to particular CB, free bindings related to that CB
474static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *pCBNode) {
475    if (pCBNode) {
476        if (pCBNode->memObjs.size() > 0) {
477            for (auto mem : pCBNode->memObjs) {
478                DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
479                if (pInfo) {
480                    pInfo->commandBufferBindings.erase(pCBNode->commandBuffer);
481                }
482            }
483            pCBNode->memObjs.clear();
484        }
485        pCBNode->validate_functions.clear();
486    }
487}
488// Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up
489static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
490    clear_cmd_buf_and_mem_references(dev_data, getCBNode(dev_data, cb));
491}
492
493// For given MemObjInfo, report Obj & CB bindings
494static bool reportMemReferencesAndCleanUp(layer_data *dev_data, DEVICE_MEM_INFO *pMemObjInfo) {
495    bool skipCall = false;
496    size_t cmdBufRefCount = pMemObjInfo->commandBufferBindings.size();
497    size_t objRefCount = pMemObjInfo->objBindings.size();
498
499    if ((pMemObjInfo->commandBufferBindings.size()) != 0) {
500        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
501                           (uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
502                           "Attempting to free memory object %#" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER
503                           " references",
504                           (uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount));
505    }
506
507    if (cmdBufRefCount > 0 && pMemObjInfo->commandBufferBindings.size() > 0) {
508        for (auto cb : pMemObjInfo->commandBufferBindings) {
509            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
510                    (uint64_t)cb, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
511                    "Command Buffer %p still has a reference to mem obj %#" PRIxLEAST64, cb, (uint64_t)pMemObjInfo->mem);
512        }
513        // Clear the list of hanging references
514        pMemObjInfo->commandBufferBindings.clear();
515    }
516
517    if (objRefCount > 0 && pMemObjInfo->objBindings.size() > 0) {
518        for (auto obj : pMemObjInfo->objBindings) {
519            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__,
520                    MEMTRACK_FREED_MEM_REF, "MEM", "VK Object %#" PRIxLEAST64 " still has a reference to mem obj %#" PRIxLEAST64,
521                    obj.handle, (uint64_t)pMemObjInfo->mem);
522        }
523        // Clear the list of hanging references
524        pMemObjInfo->objBindings.clear();
525    }
526    return skipCall;
527}
528
529static bool deleteMemObjInfo(layer_data *my_data, void *object, VkDeviceMemory mem) {
530    bool skipCall = false;
531    auto item = my_data->memObjMap.find(mem);
532    if (item != my_data->memObjMap.end()) {
533        my_data->memObjMap.erase(item);
534    } else {
535        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
536                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
537                           "Request to delete memory object %#" PRIxLEAST64 " not present in memory Object Map", (uint64_t)mem);
538    }
539    return skipCall;
540}
541
542// Check if fence for given CB is completed
543static bool checkCBCompleted(layer_data *my_data, const VkCommandBuffer cb, bool *complete) {
544    GLOBAL_CB_NODE *pCBNode = getCBNode(my_data, cb);
545    bool skipCall = false;
546    *complete = true;
547
548    if (pCBNode) {
549        if (pCBNode->lastSubmittedQueue != NULL) {
550            VkQueue queue = pCBNode->lastSubmittedQueue;
551            QUEUE_NODE *pQueueInfo = &my_data->queueMap[queue];
552            if (pCBNode->fenceId > pQueueInfo->lastRetiredId) {
553                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
554                                   VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)cb, __LINE__, MEMTRACK_NONE, "MEM",
555                                   "fence %#" PRIxLEAST64 " for CB %p has not been checked for completion",
556                                   (uint64_t)pCBNode->lastSubmittedFence, cb);
557                *complete = false;
558            }
559        }
560    }
561    return skipCall;
562}
563
564static bool freeMemObjInfo(layer_data *dev_data, void *object, VkDeviceMemory mem, bool internal) {
565    bool skipCall = false;
566    // Parse global list to find info w/ mem
567    DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
568    if (pInfo) {
569        if (pInfo->allocInfo.allocationSize == 0 && !internal) {
570            // TODO: Verify against Valid Use section
571            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
572                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
573                               "Attempting to free memory associated with a Persistent Image, %#" PRIxLEAST64 ", "
574                               "this should not be explicitly freed\n",
575                               (uint64_t)mem);
576        } else {
577            // Clear any CB bindings for completed CBs
578            //   TODO : Is there a better place to do this?
579
580            assert(pInfo->object != VK_NULL_HANDLE);
581            // clear_cmd_buf_and_mem_references removes elements from
582            // pInfo->commandBufferBindings -- this copy not needed in c++14,
583            // and probably not needed in practice in c++11
584            auto bindings = pInfo->commandBufferBindings;
585            for (auto cb : bindings) {
586                bool commandBufferComplete = false;
587                skipCall |= checkCBCompleted(dev_data, cb, &commandBufferComplete);
588                if (commandBufferComplete) {
589                    clear_cmd_buf_and_mem_references(dev_data, cb);
590                }
591            }
592
593            // Now verify that no references to this mem obj remain and remove bindings
594            if (pInfo->commandBufferBindings.size() || pInfo->objBindings.size()) {
595                skipCall |= reportMemReferencesAndCleanUp(dev_data, pInfo);
596            }
597            // Delete mem obj info
598            skipCall |= deleteMemObjInfo(dev_data, object, mem);
599        }
600    }
601    return skipCall;
602}
603
604static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
605    switch (type) {
606    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
607        return "image";
608    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
609        return "buffer";
610    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
611        return "swapchain";
612    default:
613        return "unknown";
614    }
615}
616
617// Remove object binding performs 3 tasks:
618// 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it
619// 2. Clear mem binding for image/buffer by setting its handle to 0
620// TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized?
621static bool clear_object_binding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
622    // TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately
623    bool skipCall = false;
624    VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
625    if (pMemBinding) {
626        DEVICE_MEM_INFO *pMemObjInfo = get_mem_obj_info(dev_data, *pMemBinding);
627        // TODO : Make sure this is a reasonable way to reset mem binding
628        *pMemBinding = VK_NULL_HANDLE;
629        if (pMemObjInfo) {
630            // This obj is bound to a memory object. Remove the reference to this object in that memory object's list,
631            // and set the objects memory binding pointer to NULL.
632            if (!pMemObjInfo->objBindings.erase({handle, type})) {
633                skipCall |=
634                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
635                            "MEM", "While trying to clear mem binding for %s obj %#" PRIxLEAST64
636                                   ", unable to find that object referenced by mem obj %#" PRIxLEAST64,
637                            object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem);
638            }
639        }
640    }
641    return skipCall;
642}
643
644// For NULL mem case, output warning
645// Make sure given object is in global object map
646//  IF a previous binding existed, output validation error
647//  Otherwise, add reference from objectInfo to memoryInfo
648//  Add reference off of objInfo
649static bool set_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
650                                VkDebugReportObjectTypeEXT type, const char *apiName) {
651    bool skipCall = false;
652    // Handle NULL case separately, just clear previous binding & decrement reference
653    if (mem == VK_NULL_HANDLE) {
654        // TODO: Verify against Valid Use section of spec.
655        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ,
656                           "MEM", "In %s, attempting to Bind Obj(%#" PRIxLEAST64 ") to NULL", apiName, handle);
657    } else {
658        VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
659        if (!pMemBinding) {
660            skipCall |=
661                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS,
662                        "MEM", "In %s, attempting to update Binding of %s Obj(%#" PRIxLEAST64 ") that's not in global list",
663                        object_type_to_string(type), apiName, handle);
664        } else {
665            // non-null case so should have real mem obj
666            DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem);
667            if (pMemInfo) {
668                DEVICE_MEM_INFO *pPrevBinding = get_mem_obj_info(dev_data, *pMemBinding);
669                if (pPrevBinding != NULL) {
670                    skipCall |=
671                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
672                                (uint64_t)mem, __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
673                                "In %s, attempting to bind memory (%#" PRIxLEAST64 ") to object (%#" PRIxLEAST64
674                                ") which has already been bound to mem object %#" PRIxLEAST64,
675                                apiName, (uint64_t)mem, handle, (uint64_t)pPrevBinding->mem);
676                } else {
677                    pMemInfo->objBindings.insert({handle, type});
678                    // For image objects, make sure default memory state is correctly set
679                    // TODO : What's the best/correct way to handle this?
680                    if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
681                        auto const image_node = dev_data->imageMap.find(VkImage(handle));
682                        if (image_node != dev_data->imageMap.end()) {
683                            VkImageCreateInfo ici = image_node->second.createInfo;
684                            if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
685                                // TODO::  More memory state transition stuff.
686                            }
687                        }
688                    }
689                    *pMemBinding = mem;
690                }
691            }
692        }
693    }
694    return skipCall;
695}
696
697// For NULL mem case, clear any previous binding Else...
698// Make sure given object is in its object map
699//  IF a previous binding existed, update binding
700//  Add reference from objectInfo to memoryInfo
701//  Add reference off of object's binding info
702// Return VK_TRUE if addition is successful, VK_FALSE otherwise
703static bool set_sparse_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
704                                       VkDebugReportObjectTypeEXT type, const char *apiName) {
705    bool skipCall = VK_FALSE;
706    // Handle NULL case separately, just clear previous binding & decrement reference
707    if (mem == VK_NULL_HANDLE) {
708        skipCall = clear_object_binding(dev_data, handle, type);
709    } else {
710        VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
711        if (!pMemBinding) {
712            skipCall |= log_msg(
713                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS, "MEM",
714                "In %s, attempting to update Binding of Obj(%#" PRIxLEAST64 ") that's not in global list()", apiName, handle);
715        } else {
716            // non-null case so should have real mem obj
717            DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
718            if (pInfo) {
719                pInfo->objBindings.insert({handle, type});
720                // Need to set mem binding for this object
721                *pMemBinding = mem;
722            }
723        }
724    }
725    return skipCall;
726}
727
728// For given Object, get 'mem' obj that it's bound to or NULL if no binding
729static bool get_mem_binding_from_object(layer_data *dev_data, const uint64_t handle,
730                                            const VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) {
731    bool skipCall = false;
732    *mem = VK_NULL_HANDLE;
733    VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
734    if (pMemBinding) {
735        *mem = *pMemBinding;
736    } else {
737        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
738                           "MEM", "Trying to get mem binding for object %#" PRIxLEAST64 " but no such object in %s list", handle,
739                           object_type_to_string(type));
740    }
741    return skipCall;
742}
743
744// Print details of MemObjInfo list
745static void print_mem_list(layer_data *dev_data) {
746    DEVICE_MEM_INFO *pInfo = NULL;
747
748    // Early out if info is not requested
749    if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
750        return;
751    }
752
753    // Just printing each msg individually for now, may want to package these into single large print
754    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
755            MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
756            dev_data->memObjMap.size());
757    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
758            MEMTRACK_NONE, "MEM", "=============================");
759
760    if (dev_data->memObjMap.size() <= 0)
761        return;
762
763    for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
764        pInfo = &(*ii).second;
765
766        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
767                __LINE__, MEMTRACK_NONE, "MEM", "    ===MemObjInfo at %p===", (void *)pInfo);
768        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
769                __LINE__, MEMTRACK_NONE, "MEM", "    Mem object: %#" PRIxLEAST64, (uint64_t)(pInfo->mem));
770        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
771                __LINE__, MEMTRACK_NONE, "MEM", "    Ref Count: " PRINTF_SIZE_T_SPECIFIER,
772                pInfo->commandBufferBindings.size() + pInfo->objBindings.size());
773        if (0 != pInfo->allocInfo.allocationSize) {
774            string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&pInfo->allocInfo, "MEM(INFO):         ");
775            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
776                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
777        } else {
778            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
779                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
780        }
781
782        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
783                __LINE__, MEMTRACK_NONE, "MEM", "    VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:",
784                pInfo->objBindings.size());
785        if (pInfo->objBindings.size() > 0) {
786            for (auto obj : pInfo->objBindings) {
787                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
788                        0, __LINE__, MEMTRACK_NONE, "MEM", "       VK OBJECT %" PRIu64, obj.handle);
789            }
790        }
791
792        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
793                __LINE__, MEMTRACK_NONE, "MEM",
794                "    VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements",
795                pInfo->commandBufferBindings.size());
796        if (pInfo->commandBufferBindings.size() > 0) {
797            for (auto cb : pInfo->commandBufferBindings) {
798                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
799                        0, __LINE__, MEMTRACK_NONE, "MEM", "      VK CB %p", cb);
800            }
801        }
802    }
803}
804
805static void printCBList(layer_data *my_data) {
806    GLOBAL_CB_NODE *pCBInfo = NULL;
807
808    // Early out if info is not requested
809    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
810        return;
811    }
812
813    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
814            MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
815            my_data->commandBufferMap.size());
816    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
817            MEMTRACK_NONE, "MEM", "==================");
818
819    if (my_data->commandBufferMap.size() <= 0)
820        return;
821
822    for (auto &cb_node : my_data->commandBufferMap) {
823        pCBInfo = cb_node.second;
824
825        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
826                __LINE__, MEMTRACK_NONE, "MEM", "    CB Info (%p) has CB %p, fenceId %" PRIx64 ", and fence %#" PRIxLEAST64,
827                (void *)pCBInfo, (void *)pCBInfo->commandBuffer, pCBInfo->fenceId, (uint64_t)pCBInfo->lastSubmittedFence);
828
829        if (pCBInfo->memObjs.size() <= 0)
830            continue;
831        for (auto obj : pCBInfo->memObjs) {
832            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
833                    __LINE__, MEMTRACK_NONE, "MEM", "      Mem obj %" PRIu64, (uint64_t)obj);
834        }
835    }
836}
837
838#endif
839
840// Return a string representation of CMD_TYPE enum
841static string cmdTypeToString(CMD_TYPE cmd) {
842    switch (cmd) {
843    case CMD_BINDPIPELINE:
844        return "CMD_BINDPIPELINE";
845    case CMD_BINDPIPELINEDELTA:
846        return "CMD_BINDPIPELINEDELTA";
847    case CMD_SETVIEWPORTSTATE:
848        return "CMD_SETVIEWPORTSTATE";
849    case CMD_SETLINEWIDTHSTATE:
850        return "CMD_SETLINEWIDTHSTATE";
851    case CMD_SETDEPTHBIASSTATE:
852        return "CMD_SETDEPTHBIASSTATE";
853    case CMD_SETBLENDSTATE:
854        return "CMD_SETBLENDSTATE";
855    case CMD_SETDEPTHBOUNDSSTATE:
856        return "CMD_SETDEPTHBOUNDSSTATE";
857    case CMD_SETSTENCILREADMASKSTATE:
858        return "CMD_SETSTENCILREADMASKSTATE";
859    case CMD_SETSTENCILWRITEMASKSTATE:
860        return "CMD_SETSTENCILWRITEMASKSTATE";
861    case CMD_SETSTENCILREFERENCESTATE:
862        return "CMD_SETSTENCILREFERENCESTATE";
863    case CMD_BINDDESCRIPTORSETS:
864        return "CMD_BINDDESCRIPTORSETS";
865    case CMD_BINDINDEXBUFFER:
866        return "CMD_BINDINDEXBUFFER";
867    case CMD_BINDVERTEXBUFFER:
868        return "CMD_BINDVERTEXBUFFER";
869    case CMD_DRAW:
870        return "CMD_DRAW";
871    case CMD_DRAWINDEXED:
872        return "CMD_DRAWINDEXED";
873    case CMD_DRAWINDIRECT:
874        return "CMD_DRAWINDIRECT";
875    case CMD_DRAWINDEXEDINDIRECT:
876        return "CMD_DRAWINDEXEDINDIRECT";
877    case CMD_DISPATCH:
878        return "CMD_DISPATCH";
879    case CMD_DISPATCHINDIRECT:
880        return "CMD_DISPATCHINDIRECT";
881    case CMD_COPYBUFFER:
882        return "CMD_COPYBUFFER";
883    case CMD_COPYIMAGE:
884        return "CMD_COPYIMAGE";
885    case CMD_BLITIMAGE:
886        return "CMD_BLITIMAGE";
887    case CMD_COPYBUFFERTOIMAGE:
888        return "CMD_COPYBUFFERTOIMAGE";
889    case CMD_COPYIMAGETOBUFFER:
890        return "CMD_COPYIMAGETOBUFFER";
891    case CMD_CLONEIMAGEDATA:
892        return "CMD_CLONEIMAGEDATA";
893    case CMD_UPDATEBUFFER:
894        return "CMD_UPDATEBUFFER";
895    case CMD_FILLBUFFER:
896        return "CMD_FILLBUFFER";
897    case CMD_CLEARCOLORIMAGE:
898        return "CMD_CLEARCOLORIMAGE";
899    case CMD_CLEARATTACHMENTS:
900        return "CMD_CLEARCOLORATTACHMENT";
901    case CMD_CLEARDEPTHSTENCILIMAGE:
902        return "CMD_CLEARDEPTHSTENCILIMAGE";
903    case CMD_RESOLVEIMAGE:
904        return "CMD_RESOLVEIMAGE";
905    case CMD_SETEVENT:
906        return "CMD_SETEVENT";
907    case CMD_RESETEVENT:
908        return "CMD_RESETEVENT";
909    case CMD_WAITEVENTS:
910        return "CMD_WAITEVENTS";
911    case CMD_PIPELINEBARRIER:
912        return "CMD_PIPELINEBARRIER";
913    case CMD_BEGINQUERY:
914        return "CMD_BEGINQUERY";
915    case CMD_ENDQUERY:
916        return "CMD_ENDQUERY";
917    case CMD_RESETQUERYPOOL:
918        return "CMD_RESETQUERYPOOL";
919    case CMD_COPYQUERYPOOLRESULTS:
920        return "CMD_COPYQUERYPOOLRESULTS";
921    case CMD_WRITETIMESTAMP:
922        return "CMD_WRITETIMESTAMP";
923    case CMD_INITATOMICCOUNTERS:
924        return "CMD_INITATOMICCOUNTERS";
925    case CMD_LOADATOMICCOUNTERS:
926        return "CMD_LOADATOMICCOUNTERS";
927    case CMD_SAVEATOMICCOUNTERS:
928        return "CMD_SAVEATOMICCOUNTERS";
929    case CMD_BEGINRENDERPASS:
930        return "CMD_BEGINRENDERPASS";
931    case CMD_ENDRENDERPASS:
932        return "CMD_ENDRENDERPASS";
933    default:
934        return "UNKNOWN";
935    }
936}
937
938// SPIRV utility functions
939static void build_def_index(shader_module *module) {
940    for (auto insn : *module) {
941        switch (insn.opcode()) {
942        /* Types */
943        case spv::OpTypeVoid:
944        case spv::OpTypeBool:
945        case spv::OpTypeInt:
946        case spv::OpTypeFloat:
947        case spv::OpTypeVector:
948        case spv::OpTypeMatrix:
949        case spv::OpTypeImage:
950        case spv::OpTypeSampler:
951        case spv::OpTypeSampledImage:
952        case spv::OpTypeArray:
953        case spv::OpTypeRuntimeArray:
954        case spv::OpTypeStruct:
955        case spv::OpTypeOpaque:
956        case spv::OpTypePointer:
957        case spv::OpTypeFunction:
958        case spv::OpTypeEvent:
959        case spv::OpTypeDeviceEvent:
960        case spv::OpTypeReserveId:
961        case spv::OpTypeQueue:
962        case spv::OpTypePipe:
963            module->def_index[insn.word(1)] = insn.offset();
964            break;
965
966        /* Fixed constants */
967        case spv::OpConstantTrue:
968        case spv::OpConstantFalse:
969        case spv::OpConstant:
970        case spv::OpConstantComposite:
971        case spv::OpConstantSampler:
972        case spv::OpConstantNull:
973            module->def_index[insn.word(2)] = insn.offset();
974            break;
975
976        /* Specialization constants */
977        case spv::OpSpecConstantTrue:
978        case spv::OpSpecConstantFalse:
979        case spv::OpSpecConstant:
980        case spv::OpSpecConstantComposite:
981        case spv::OpSpecConstantOp:
982            module->def_index[insn.word(2)] = insn.offset();
983            break;
984
985        /* Variables */
986        case spv::OpVariable:
987            module->def_index[insn.word(2)] = insn.offset();
988            break;
989
990        /* Functions */
991        case spv::OpFunction:
992            module->def_index[insn.word(2)] = insn.offset();
993            break;
994
995        default:
996            /* We don't care about any other defs for now. */
997            break;
998        }
999    }
1000}
1001
1002static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
1003    for (auto insn : *src) {
1004        if (insn.opcode() == spv::OpEntryPoint) {
1005            auto entrypointName = (char const *)&insn.word(3);
1006            auto entrypointStageBits = 1u << insn.word(1);
1007
1008            if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
1009                return insn;
1010            }
1011        }
1012    }
1013
1014    return src->end();
1015}
1016
1017bool shader_is_spirv(VkShaderModuleCreateInfo const *pCreateInfo) {
1018    uint32_t *words = (uint32_t *)pCreateInfo->pCode;
1019    size_t sizeInWords = pCreateInfo->codeSize / sizeof(uint32_t);
1020
1021    /* Just validate that the header makes sense. */
1022    return sizeInWords >= 5 && words[0] == spv::MagicNumber && words[1] == spv::Version;
1023}
1024
1025static char const *storage_class_name(unsigned sc) {
1026    switch (sc) {
1027    case spv::StorageClassInput:
1028        return "input";
1029    case spv::StorageClassOutput:
1030        return "output";
1031    case spv::StorageClassUniformConstant:
1032        return "const uniform";
1033    case spv::StorageClassUniform:
1034        return "uniform";
1035    case spv::StorageClassWorkgroup:
1036        return "workgroup local";
1037    case spv::StorageClassCrossWorkgroup:
1038        return "workgroup global";
1039    case spv::StorageClassPrivate:
1040        return "private global";
1041    case spv::StorageClassFunction:
1042        return "function";
1043    case spv::StorageClassGeneric:
1044        return "generic";
1045    case spv::StorageClassAtomicCounter:
1046        return "atomic counter";
1047    case spv::StorageClassImage:
1048        return "image";
1049    case spv::StorageClassPushConstant:
1050        return "push constant";
1051    default:
1052        return "unknown";
1053    }
1054}
1055
1056/* get the value of an integral constant */
1057unsigned get_constant_value(shader_module const *src, unsigned id) {
1058    auto value = src->get_def(id);
1059    assert(value != src->end());
1060
1061    if (value.opcode() != spv::OpConstant) {
1062        /* TODO: Either ensure that the specialization transform is already performed on a module we're
1063            considering here, OR -- specialize on the fly now.
1064            */
1065        return 1;
1066    }
1067
1068    return value.word(3);
1069}
1070
1071
1072static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
1073    auto insn = src->get_def(type);
1074    assert(insn != src->end());
1075
1076    switch (insn.opcode()) {
1077    case spv::OpTypeBool:
1078        ss << "bool";
1079        break;
1080    case spv::OpTypeInt:
1081        ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
1082        break;
1083    case spv::OpTypeFloat:
1084        ss << "float" << insn.word(2);
1085        break;
1086    case spv::OpTypeVector:
1087        ss << "vec" << insn.word(3) << " of ";
1088        describe_type_inner(ss, src, insn.word(2));
1089        break;
1090    case spv::OpTypeMatrix:
1091        ss << "mat" << insn.word(3) << " of ";
1092        describe_type_inner(ss, src, insn.word(2));
1093        break;
1094    case spv::OpTypeArray:
1095        ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
1096        describe_type_inner(ss, src, insn.word(2));
1097        break;
1098    case spv::OpTypePointer:
1099        ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1100        describe_type_inner(ss, src, insn.word(3));
1101        break;
1102    case spv::OpTypeStruct: {
1103        ss << "struct of (";
1104        for (unsigned i = 2; i < insn.len(); i++) {
1105            describe_type_inner(ss, src, insn.word(i));
1106            if (i == insn.len() - 1) {
1107                ss << ")";
1108            } else {
1109                ss << ", ";
1110            }
1111        }
1112        break;
1113    }
1114    case spv::OpTypeSampler:
1115        ss << "sampler";
1116        break;
1117    case spv::OpTypeSampledImage:
1118        ss << "sampler+";
1119        describe_type_inner(ss, src, insn.word(2));
1120        break;
1121    case spv::OpTypeImage:
1122        ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1123        break;
1124    default:
1125        ss << "oddtype";
1126        break;
1127    }
1128}
1129
1130
1131static std::string describe_type(shader_module const *src, unsigned type) {
1132    std::ostringstream ss;
1133    describe_type_inner(ss, src, type);
1134    return ss.str();
1135}
1136
1137
1138static bool is_narrow_numeric_type(spirv_inst_iter type)
1139{
1140    if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat)
1141        return false;
1142    return type.word(2) < 64;
1143}
1144
1145
1146static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed, bool b_arrayed, bool relaxed) {
1147    /* walk two type trees together, and complain about differences */
1148    auto a_insn = a->get_def(a_type);
1149    auto b_insn = b->get_def(b_type);
1150    assert(a_insn != a->end());
1151    assert(b_insn != b->end());
1152
1153    if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) {
1154        return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed);
1155    }
1156
1157    if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1158        /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */
1159        return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed);
1160    }
1161
1162    if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) {
1163        return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false);
1164    }
1165
1166    if (a_insn.opcode() != b_insn.opcode()) {
1167        return false;
1168    }
1169
1170    if (a_insn.opcode() == spv::OpTypePointer) {
1171        /* match on pointee type. storage class is expected to differ */
1172        return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed);
1173    }
1174
1175    if (a_arrayed || b_arrayed) {
1176        /* if we havent resolved array-of-verts by here, we're not going to. */
1177        return false;
1178    }
1179
1180    switch (a_insn.opcode()) {
1181    case spv::OpTypeBool:
1182        return true;
1183    case spv::OpTypeInt:
1184        /* match on width, signedness */
1185        return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3);
1186    case spv::OpTypeFloat:
1187        /* match on width */
1188        return a_insn.word(2) == b_insn.word(2);
1189    case spv::OpTypeVector:
1190        /* match on element type, count. */
1191        if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false))
1192            return false;
1193        if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) {
1194            return a_insn.word(3) >= b_insn.word(3);
1195        }
1196        else {
1197            return a_insn.word(3) == b_insn.word(3);
1198        }
1199    case spv::OpTypeMatrix:
1200        /* match on element type, count. */
1201        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && a_insn.word(3) == b_insn.word(3);
1202    case spv::OpTypeArray:
1203        /* match on element type, count. these all have the same layout. we don't get here if
1204         * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction,
1205         * not a literal within OpTypeArray */
1206        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
1207               get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1208    case spv::OpTypeStruct:
1209        /* match on all element types */
1210        {
1211            if (a_insn.len() != b_insn.len()) {
1212                return false; /* structs cannot match if member counts differ */
1213            }
1214
1215            for (unsigned i = 2; i < a_insn.len(); i++) {
1216                if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) {
1217                    return false;
1218                }
1219            }
1220
1221            return true;
1222        }
1223    default:
1224        /* remaining types are CLisms, or may not appear in the interfaces we
1225         * are interested in. Just claim no match.
1226         */
1227        return false;
1228    }
1229}
1230
1231static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1232    auto it = map.find(id);
1233    if (it == map.end())
1234        return def;
1235    else
1236        return it->second;
1237}
1238
1239static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1240    auto insn = src->get_def(type);
1241    assert(insn != src->end());
1242
1243    switch (insn.opcode()) {
1244    case spv::OpTypePointer:
1245        /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
1246         * we're never actually passing pointers around. */
1247        return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1248    case spv::OpTypeArray:
1249        if (strip_array_level) {
1250            return get_locations_consumed_by_type(src, insn.word(2), false);
1251        } else {
1252            return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1253        }
1254    case spv::OpTypeMatrix:
1255        /* num locations is the dimension * element size */
1256        return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1257    default:
1258        /* everything else is just 1. */
1259        return 1;
1260
1261        /* TODO: extend to handle 64bit scalar types, whose vectors may need
1262         * multiple locations. */
1263    }
1264}
1265
1266typedef std::pair<unsigned, unsigned> location_t;
1267typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1268
1269struct interface_var {
1270    uint32_t id;
1271    uint32_t type_id;
1272    uint32_t offset;
1273    bool is_patch;
1274    /* TODO: collect the name, too? Isn't required to be present. */
1275};
1276
1277struct shader_stage_attributes {
1278    char const *const name;
1279    bool arrayed_input;
1280    bool arrayed_output;
1281};
1282
1283static shader_stage_attributes shader_stage_attribs[] = {
1284    {"vertex shader", false, false},
1285    {"tessellation control shader", true, true},
1286    {"tessellation evaluation shader", true, false},
1287    {"geometry shader", true, false},
1288    {"fragment shader", false, false},
1289};
1290
1291static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1292    while (true) {
1293
1294        if (def.opcode() == spv::OpTypePointer) {
1295            def = src->get_def(def.word(3));
1296        } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1297            def = src->get_def(def.word(2));
1298            is_array_of_verts = false;
1299        } else if (def.opcode() == spv::OpTypeStruct) {
1300            return def;
1301        } else {
1302            return src->end();
1303        }
1304    }
1305}
1306
1307static void collect_interface_block_members(layer_data *my_data, shader_module const *src,
1308                                            std::map<location_t, interface_var> &out,
1309                                            std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1310                                            uint32_t id, uint32_t type_id, bool is_patch) {
1311    /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */
1312    auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch);
1313    if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1314        /* this isn't an interface block. */
1315        return;
1316    }
1317
1318    std::unordered_map<unsigned, unsigned> member_components;
1319
1320    /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */
1321    for (auto insn : *src) {
1322        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1323            unsigned member_index = insn.word(2);
1324
1325            if (insn.word(3) == spv::DecorationComponent) {
1326                unsigned component = insn.word(4);
1327                member_components[member_index] = component;
1328            }
1329        }
1330    }
1331
1332    /* Second pass -- produce the output, from Location decorations */
1333    for (auto insn : *src) {
1334        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1335            unsigned member_index = insn.word(2);
1336            unsigned member_type_id = type.word(2 + member_index);
1337
1338            if (insn.word(3) == spv::DecorationLocation) {
1339                unsigned location = insn.word(4);
1340                unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1341                auto component_it = member_components.find(member_index);
1342                unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1343
1344                for (unsigned int offset = 0; offset < num_locations; offset++) {
1345                    interface_var v;
1346                    v.id = id;
1347                    /* TODO: member index in interface_var too? */
1348                    v.type_id = member_type_id;
1349                    v.offset = offset;
1350                    v.is_patch = is_patch;
1351                    out[std::make_pair(location + offset, component)] = v;
1352                }
1353            }
1354        }
1355    }
1356}
1357
1358static void collect_interface_by_location(layer_data *my_data, shader_module const *src, spirv_inst_iter entrypoint,
1359                                          spv::StorageClass sinterface, std::map<location_t, interface_var> &out,
1360                                          bool is_array_of_verts) {
1361    std::unordered_map<unsigned, unsigned> var_locations;
1362    std::unordered_map<unsigned, unsigned> var_builtins;
1363    std::unordered_map<unsigned, unsigned> var_components;
1364    std::unordered_map<unsigned, unsigned> blocks;
1365    std::unordered_map<unsigned, unsigned> var_patch;
1366
1367    for (auto insn : *src) {
1368
1369        /* We consider two interface models: SSO rendezvous-by-location, and
1370         * builtins. Complain about anything that fits neither model.
1371         */
1372        if (insn.opcode() == spv::OpDecorate) {
1373            if (insn.word(2) == spv::DecorationLocation) {
1374                var_locations[insn.word(1)] = insn.word(3);
1375            }
1376
1377            if (insn.word(2) == spv::DecorationBuiltIn) {
1378                var_builtins[insn.word(1)] = insn.word(3);
1379            }
1380
1381            if (insn.word(2) == spv::DecorationComponent) {
1382                var_components[insn.word(1)] = insn.word(3);
1383            }
1384
1385            if (insn.word(2) == spv::DecorationBlock) {
1386                blocks[insn.word(1)] = 1;
1387            }
1388
1389            if (insn.word(2) == spv::DecorationPatch) {
1390                var_patch[insn.word(1)] = 1;
1391            }
1392        }
1393    }
1394
1395    /* TODO: handle grouped decorations */
1396    /* TODO: handle index=1 dual source outputs from FS -- two vars will
1397     * have the same location, and we DON'T want to clobber. */
1398
1399    /* find the end of the entrypoint's name string. additional zero bytes follow the actual null
1400       terminator, to fill out the rest of the word - so we only need to look at the last byte in
1401       the word to determine which word contains the terminator. */
1402    uint32_t word = 3;
1403    while (entrypoint.word(word) & 0xff000000u) {
1404        ++word;
1405    }
1406    ++word;
1407
1408    for (; word < entrypoint.len(); word++) {
1409        auto insn = src->get_def(entrypoint.word(word));
1410        assert(insn != src->end());
1411        assert(insn.opcode() == spv::OpVariable);
1412
1413        if (insn.word(3) == static_cast<uint32_t>(sinterface)) {
1414            unsigned id = insn.word(2);
1415            unsigned type = insn.word(1);
1416
1417            int location = value_or_default(var_locations, id, -1);
1418            int builtin = value_or_default(var_builtins, id, -1);
1419            unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */
1420            bool is_patch = var_patch.find(id) != var_patch.end();
1421
1422            /* All variables and interface block members in the Input or Output storage classes
1423             * must be decorated with either a builtin or an explicit location.
1424             *
1425             * TODO: integrate the interface block support here. For now, don't complain --
1426             * a valid SPIRV module will only hit this path for the interface block case, as the
1427             * individual members of the type are decorated, rather than variable declarations.
1428             */
1429
1430            if (location != -1) {
1431                /* A user-defined interface variable, with a location. Where a variable
1432                 * occupied multiple locations, emit one result for each. */
1433                unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch);
1434                for (unsigned int offset = 0; offset < num_locations; offset++) {
1435                    interface_var v;
1436                    v.id = id;
1437                    v.type_id = type;
1438                    v.offset = offset;
1439                    v.is_patch = is_patch;
1440                    out[std::make_pair(location + offset, component)] = v;
1441                }
1442            } else if (builtin == -1) {
1443                /* An interface block instance */
1444                collect_interface_block_members(my_data, src, out, blocks, is_array_of_verts, id, type, is_patch);
1445            }
1446        }
1447    }
1448}
1449
1450static void collect_interface_by_descriptor_slot(layer_data *my_data, shader_module const *src,
1451                                                 std::unordered_set<uint32_t> const &accessible_ids,
1452                                                 std::map<descriptor_slot_t, interface_var> &out) {
1453
1454    std::unordered_map<unsigned, unsigned> var_sets;
1455    std::unordered_map<unsigned, unsigned> var_bindings;
1456
1457    for (auto insn : *src) {
1458        /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1459         * DecorationDescriptorSet and DecorationBinding.
1460         */
1461        if (insn.opcode() == spv::OpDecorate) {
1462            if (insn.word(2) == spv::DecorationDescriptorSet) {
1463                var_sets[insn.word(1)] = insn.word(3);
1464            }
1465
1466            if (insn.word(2) == spv::DecorationBinding) {
1467                var_bindings[insn.word(1)] = insn.word(3);
1468            }
1469        }
1470    }
1471
1472    for (auto id : accessible_ids) {
1473        auto insn = src->get_def(id);
1474        assert(insn != src->end());
1475
1476        if (insn.opcode() == spv::OpVariable &&
1477            (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1478            unsigned set = value_or_default(var_sets, insn.word(2), 0);
1479            unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1480
1481            auto existing_it = out.find(std::make_pair(set, binding));
1482            if (existing_it != out.end()) {
1483                /* conflict within spv image */
1484                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1485                        __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
1486                        "var %d (type %d) in %s interface in descriptor slot (%u,%u) conflicts with existing definition",
1487                        insn.word(2), insn.word(1), storage_class_name(insn.word(3)), existing_it->first.first,
1488                        existing_it->first.second);
1489            }
1490
1491            interface_var v;
1492            v.id = insn.word(2);
1493            v.type_id = insn.word(1);
1494            v.offset = 0;
1495            v.is_patch = false;
1496            out[std::make_pair(set, binding)] = v;
1497        }
1498    }
1499}
1500
1501static bool validate_interface_between_stages(layer_data *my_data, shader_module const *producer,
1502                                              spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage,
1503                                              shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1504                                              shader_stage_attributes const *consumer_stage) {
1505    std::map<location_t, interface_var> outputs;
1506    std::map<location_t, interface_var> inputs;
1507
1508    bool pass = true;
1509
1510    collect_interface_by_location(my_data, producer, producer_entrypoint, spv::StorageClassOutput, outputs, producer_stage->arrayed_output);
1511    collect_interface_by_location(my_data, consumer, consumer_entrypoint, spv::StorageClassInput, inputs, consumer_stage->arrayed_input);
1512
1513    auto a_it = outputs.begin();
1514    auto b_it = inputs.begin();
1515
1516    /* maps sorted by key (location); walk them together to find mismatches */
1517    while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1518        bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1519        bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1520        auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1521        auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1522
1523        if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1524            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1525                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1526                        "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first,
1527                        a_first.second, consumer_stage->name)) {
1528                pass = false;
1529            }
1530            a_it++;
1531        } else if (a_at_end || a_first > b_first) {
1532            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1533                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
1534                        "%s consumes input location %u.%u which is not written by %s", consumer_stage->name, b_first.first, b_first.second,
1535                        producer_stage->name)) {
1536                pass = false;
1537            }
1538            b_it++;
1539        } else {
1540            if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id,
1541                             producer_stage->arrayed_output && !a_it->second.is_patch,
1542                             consumer_stage->arrayed_input && !b_it->second.is_patch,
1543                             true)) {
1544                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1545                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1546                            a_first.first, a_first.second,
1547                            describe_type(producer, a_it->second.type_id).c_str(),
1548                            describe_type(consumer, b_it->second.type_id).c_str())) {
1549                    pass = false;
1550                }
1551            }
1552            if (a_it->second.is_patch != b_it->second.is_patch) {
1553                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1554                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1555                            "Decoration mismatch on location %u.%u: is per-%s in %s stage but "
1556                            "per-%s in %s stage", a_first.first, a_first.second,
1557                            a_it->second.is_patch ? "patch" : "vertex", producer_stage->name,
1558                            b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) {
1559                    pass = false;
1560                }
1561            }
1562            a_it++;
1563            b_it++;
1564        }
1565    }
1566
1567    return pass;
1568}
1569
1570enum FORMAT_TYPE {
1571    FORMAT_TYPE_UNDEFINED,
1572    FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
1573    FORMAT_TYPE_SINT,
1574    FORMAT_TYPE_UINT,
1575};
1576
1577static unsigned get_format_type(VkFormat fmt) {
1578    switch (fmt) {
1579    case VK_FORMAT_UNDEFINED:
1580        return FORMAT_TYPE_UNDEFINED;
1581    case VK_FORMAT_R8_SINT:
1582    case VK_FORMAT_R8G8_SINT:
1583    case VK_FORMAT_R8G8B8_SINT:
1584    case VK_FORMAT_R8G8B8A8_SINT:
1585    case VK_FORMAT_R16_SINT:
1586    case VK_FORMAT_R16G16_SINT:
1587    case VK_FORMAT_R16G16B16_SINT:
1588    case VK_FORMAT_R16G16B16A16_SINT:
1589    case VK_FORMAT_R32_SINT:
1590    case VK_FORMAT_R32G32_SINT:
1591    case VK_FORMAT_R32G32B32_SINT:
1592    case VK_FORMAT_R32G32B32A32_SINT:
1593    case VK_FORMAT_B8G8R8_SINT:
1594    case VK_FORMAT_B8G8R8A8_SINT:
1595    case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1596    case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1597        return FORMAT_TYPE_SINT;
1598    case VK_FORMAT_R8_UINT:
1599    case VK_FORMAT_R8G8_UINT:
1600    case VK_FORMAT_R8G8B8_UINT:
1601    case VK_FORMAT_R8G8B8A8_UINT:
1602    case VK_FORMAT_R16_UINT:
1603    case VK_FORMAT_R16G16_UINT:
1604    case VK_FORMAT_R16G16B16_UINT:
1605    case VK_FORMAT_R16G16B16A16_UINT:
1606    case VK_FORMAT_R32_UINT:
1607    case VK_FORMAT_R32G32_UINT:
1608    case VK_FORMAT_R32G32B32_UINT:
1609    case VK_FORMAT_R32G32B32A32_UINT:
1610    case VK_FORMAT_B8G8R8_UINT:
1611    case VK_FORMAT_B8G8R8A8_UINT:
1612    case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1613    case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1614        return FORMAT_TYPE_UINT;
1615    default:
1616        return FORMAT_TYPE_FLOAT;
1617    }
1618}
1619
1620/* characterizes a SPIR-V type appearing in an interface to a FF stage,
1621 * for comparison to a VkFormat's characterization above. */
1622static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1623    auto insn = src->get_def(type);
1624    assert(insn != src->end());
1625
1626    switch (insn.opcode()) {
1627    case spv::OpTypeInt:
1628        return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1629    case spv::OpTypeFloat:
1630        return FORMAT_TYPE_FLOAT;
1631    case spv::OpTypeVector:
1632        return get_fundamental_type(src, insn.word(2));
1633    case spv::OpTypeMatrix:
1634        return get_fundamental_type(src, insn.word(2));
1635    case spv::OpTypeArray:
1636        return get_fundamental_type(src, insn.word(2));
1637    case spv::OpTypePointer:
1638        return get_fundamental_type(src, insn.word(3));
1639    default:
1640        return FORMAT_TYPE_UNDEFINED;
1641    }
1642}
1643
1644static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1645    uint32_t bit_pos = u_ffs(stage);
1646    return bit_pos - 1;
1647}
1648
1649static bool validate_vi_consistency(layer_data *my_data, VkPipelineVertexInputStateCreateInfo const *vi) {
1650    /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
1651     * each binding should be specified only once.
1652     */
1653    std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1654    bool pass = true;
1655
1656    for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1657        auto desc = &vi->pVertexBindingDescriptions[i];
1658        auto &binding = bindings[desc->binding];
1659        if (binding) {
1660            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1661                        __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1662                        "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1663                pass = false;
1664            }
1665        } else {
1666            binding = desc;
1667        }
1668    }
1669
1670    return pass;
1671}
1672
1673static bool validate_vi_against_vs_inputs(layer_data *my_data, VkPipelineVertexInputStateCreateInfo const *vi,
1674                                          shader_module const *vs, spirv_inst_iter entrypoint) {
1675    std::map<location_t, interface_var> inputs;
1676    bool pass = true;
1677
1678    collect_interface_by_location(my_data, vs, entrypoint, spv::StorageClassInput, inputs, false);
1679
1680    /* Build index by location */
1681    std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1682    if (vi) {
1683        for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++)
1684            attribs[vi->pVertexAttributeDescriptions[i].location] = &vi->pVertexAttributeDescriptions[i];
1685    }
1686
1687    auto it_a = attribs.begin();
1688    auto it_b = inputs.begin();
1689
1690    while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1691        bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1692        bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1693        auto a_first = a_at_end ? 0 : it_a->first;
1694        auto b_first = b_at_end ? 0 : it_b->first.first;
1695        if (!a_at_end && (b_at_end || a_first < b_first)) {
1696            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1697                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1698                        "Vertex attribute at location %d not consumed by VS", a_first)) {
1699                pass = false;
1700            }
1701            it_a++;
1702        } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1703            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1704                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "VS consumes input at location %d but not provided",
1705                        b_first)) {
1706                pass = false;
1707            }
1708            it_b++;
1709        } else {
1710            unsigned attrib_type = get_format_type(it_a->second->format);
1711            unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1712
1713            /* type checking */
1714            if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1715                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1716                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1717                            "Attribute type of `%s` at location %d does not match VS input type of `%s`",
1718                            string_VkFormat(it_a->second->format), a_first,
1719                            describe_type(vs, it_b->second.type_id).c_str())) {
1720                    pass = false;
1721                }
1722            }
1723
1724            /* OK! */
1725            it_a++;
1726            it_b++;
1727        }
1728    }
1729
1730    return pass;
1731}
1732
1733static bool validate_fs_outputs_against_render_pass(layer_data *my_data, shader_module const *fs,
1734                                                    spirv_inst_iter entrypoint, RENDER_PASS_NODE const *rp, uint32_t subpass) {
1735    const std::vector<VkFormat> &color_formats = rp->subpassColorFormats[subpass];
1736    std::map<location_t, interface_var> outputs;
1737    bool pass = true;
1738
1739    /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
1740
1741    collect_interface_by_location(my_data, fs, entrypoint, spv::StorageClassOutput, outputs, false);
1742
1743    auto it = outputs.begin();
1744    uint32_t attachment = 0;
1745
1746    /* Walk attachment list and outputs together -- this is a little overpowered since attachments
1747     * are currently dense, but the parallel with matching between shader stages is nice.
1748     */
1749
1750    while ((outputs.size() > 0 && it != outputs.end()) || attachment < color_formats.size()) {
1751        if (attachment == color_formats.size() || (it != outputs.end() && it->first.first < attachment)) {
1752            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1753                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1754                        "FS writes to output location %d with no matching attachment", it->first.first)) {
1755                pass = false;
1756            }
1757            it++;
1758        } else if (it == outputs.end() || it->first.first > attachment) {
1759            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1760                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by FS", attachment)) {
1761                pass = false;
1762            }
1763            attachment++;
1764        } else {
1765            unsigned output_type = get_fundamental_type(fs, it->second.type_id);
1766            unsigned att_type = get_format_type(color_formats[attachment]);
1767
1768            /* type checking */
1769            if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
1770                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1771                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1772                            "Attachment %d of type `%s` does not match FS output type of `%s`", attachment,
1773                            string_VkFormat(color_formats[attachment]),
1774                            describe_type(fs, it->second.type_id).c_str())) {
1775                    pass = false;
1776                }
1777            }
1778
1779            /* OK! */
1780            it++;
1781            attachment++;
1782        }
1783    }
1784
1785    return pass;
1786}
1787
1788/* For some analyses, we need to know about all ids referenced by the static call tree of a particular
1789 * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint,
1790 * for example.
1791 * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
1792 *  - NOT the shader input/output interfaces.
1793 *
1794 * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
1795 * converting parts of this to be generated from the machine-readable spec instead.
1796 */
1797static void mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint, std::unordered_set<uint32_t> &ids) {
1798    std::unordered_set<uint32_t> worklist;
1799    worklist.insert(entrypoint.word(2));
1800
1801    while (!worklist.empty()) {
1802        auto id_iter = worklist.begin();
1803        auto id = *id_iter;
1804        worklist.erase(id_iter);
1805
1806        auto insn = src->get_def(id);
1807        if (insn == src->end()) {
1808            /* id is something we didn't collect in build_def_index. that's OK -- we'll stumble
1809             * across all kinds of things here that we may not care about. */
1810            continue;
1811        }
1812
1813        /* try to add to the output set */
1814        if (!ids.insert(id).second) {
1815            continue; /* if we already saw this id, we don't want to walk it again. */
1816        }
1817
1818        switch (insn.opcode()) {
1819        case spv::OpFunction:
1820            /* scan whole body of the function, enlisting anything interesting */
1821            while (++insn, insn.opcode() != spv::OpFunctionEnd) {
1822                switch (insn.opcode()) {
1823                case spv::OpLoad:
1824                case spv::OpAtomicLoad:
1825                case spv::OpAtomicExchange:
1826                case spv::OpAtomicCompareExchange:
1827                case spv::OpAtomicCompareExchangeWeak:
1828                case spv::OpAtomicIIncrement:
1829                case spv::OpAtomicIDecrement:
1830                case spv::OpAtomicIAdd:
1831                case spv::OpAtomicISub:
1832                case spv::OpAtomicSMin:
1833                case spv::OpAtomicUMin:
1834                case spv::OpAtomicSMax:
1835                case spv::OpAtomicUMax:
1836                case spv::OpAtomicAnd:
1837                case spv::OpAtomicOr:
1838                case spv::OpAtomicXor:
1839                    worklist.insert(insn.word(3)); /* ptr */
1840                    break;
1841                case spv::OpStore:
1842                case spv::OpAtomicStore:
1843                    worklist.insert(insn.word(1)); /* ptr */
1844                    break;
1845                case spv::OpAccessChain:
1846                case spv::OpInBoundsAccessChain:
1847                    worklist.insert(insn.word(3)); /* base ptr */
1848                    break;
1849                case spv::OpSampledImage:
1850                case spv::OpImageSampleImplicitLod:
1851                case spv::OpImageSampleExplicitLod:
1852                case spv::OpImageSampleDrefImplicitLod:
1853                case spv::OpImageSampleDrefExplicitLod:
1854                case spv::OpImageSampleProjImplicitLod:
1855                case spv::OpImageSampleProjExplicitLod:
1856                case spv::OpImageSampleProjDrefImplicitLod:
1857                case spv::OpImageSampleProjDrefExplicitLod:
1858                case spv::OpImageFetch:
1859                case spv::OpImageGather:
1860                case spv::OpImageDrefGather:
1861                case spv::OpImageRead:
1862                case spv::OpImage:
1863                case spv::OpImageQueryFormat:
1864                case spv::OpImageQueryOrder:
1865                case spv::OpImageQuerySizeLod:
1866                case spv::OpImageQuerySize:
1867                case spv::OpImageQueryLod:
1868                case spv::OpImageQueryLevels:
1869                case spv::OpImageQuerySamples:
1870                case spv::OpImageSparseSampleImplicitLod:
1871                case spv::OpImageSparseSampleExplicitLod:
1872                case spv::OpImageSparseSampleDrefImplicitLod:
1873                case spv::OpImageSparseSampleDrefExplicitLod:
1874                case spv::OpImageSparseSampleProjImplicitLod:
1875                case spv::OpImageSparseSampleProjExplicitLod:
1876                case spv::OpImageSparseSampleProjDrefImplicitLod:
1877                case spv::OpImageSparseSampleProjDrefExplicitLod:
1878                case spv::OpImageSparseFetch:
1879                case spv::OpImageSparseGather:
1880                case spv::OpImageSparseDrefGather:
1881                case spv::OpImageTexelPointer:
1882                    worklist.insert(insn.word(3)); /* image or sampled image */
1883                    break;
1884                case spv::OpImageWrite:
1885                    worklist.insert(insn.word(1)); /* image -- different operand order to above */
1886                    break;
1887                case spv::OpFunctionCall:
1888                    for (uint32_t i = 3; i < insn.len(); i++) {
1889                        worklist.insert(insn.word(i)); /* fn itself, and all args */
1890                    }
1891                    break;
1892
1893                case spv::OpExtInst:
1894                    for (uint32_t i = 5; i < insn.len(); i++) {
1895                        worklist.insert(insn.word(i)); /* operands to ext inst */
1896                    }
1897                    break;
1898                }
1899            }
1900            break;
1901        }
1902    }
1903}
1904
1905static bool validate_push_constant_block_against_pipeline(layer_data *my_data,
1906                                                          std::vector<VkPushConstantRange> const *pushConstantRanges,
1907                                                          shader_module const *src, spirv_inst_iter type,
1908                                                          VkShaderStageFlagBits stage) {
1909    bool pass = true;
1910
1911    /* strip off ptrs etc */
1912    type = get_struct_type(src, type, false);
1913    assert(type != src->end());
1914
1915    /* validate directly off the offsets. this isn't quite correct for arrays
1916     * and matrices, but is a good first step. TODO: arrays, matrices, weird
1917     * sizes */
1918    for (auto insn : *src) {
1919        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1920
1921            if (insn.word(3) == spv::DecorationOffset) {
1922                unsigned offset = insn.word(4);
1923                auto size = 4; /* bytes; TODO: calculate this based on the type */
1924
1925                bool found_range = false;
1926                for (auto const &range : *pushConstantRanges) {
1927                    if (range.offset <= offset && range.offset + range.size >= offset + size) {
1928                        found_range = true;
1929
1930                        if ((range.stageFlags & stage) == 0) {
1931                            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1932                                        __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
1933                                        "Push constant range covering variable starting at "
1934                                        "offset %u not accessible from stage %s",
1935                                        offset, string_VkShaderStageFlagBits(stage))) {
1936                                pass = false;
1937                            }
1938                        }
1939
1940                        break;
1941                    }
1942                }
1943
1944                if (!found_range) {
1945                    if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1946                                __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
1947                                "Push constant range covering variable starting at "
1948                                "offset %u not declared in layout",
1949                                offset)) {
1950                        pass = false;
1951                    }
1952                }
1953            }
1954        }
1955    }
1956
1957    return pass;
1958}
1959
1960static bool validate_push_constant_usage(layer_data *my_data,
1961                                         std::vector<VkPushConstantRange> const *pushConstantRanges, shader_module const *src,
1962                                         std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
1963    bool pass = true;
1964
1965    for (auto id : accessible_ids) {
1966        auto def_insn = src->get_def(id);
1967        if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
1968            pass &= validate_push_constant_block_against_pipeline(my_data, pushConstantRanges, src,
1969                                                                 src->get_def(def_insn.word(1)), stage);
1970        }
1971    }
1972
1973    return pass;
1974}
1975
1976// For given pipelineLayout verify that the setLayout at slot.first
1977//  has the requested binding at slot.second
1978static VkDescriptorSetLayoutBinding const * get_descriptor_binding(layer_data *my_data, PIPELINE_LAYOUT_NODE *pipelineLayout, descriptor_slot_t slot) {
1979
1980    if (!pipelineLayout)
1981        return nullptr;
1982
1983    if (slot.first >= pipelineLayout->descriptorSetLayouts.size())
1984        return nullptr;
1985
1986    auto const layout_node = my_data->descriptorSetLayoutMap[pipelineLayout->descriptorSetLayouts[slot.first]];
1987
1988    auto bindingIt = layout_node->bindingToIndexMap.find(slot.second);
1989    if ((bindingIt == layout_node->bindingToIndexMap.end()) || (layout_node->createInfo.pBindings == NULL))
1990        return nullptr;
1991
1992    assert(bindingIt->second < layout_node->createInfo.bindingCount);
1993    return &layout_node->createInfo.pBindings[bindingIt->second];
1994}
1995
1996// Block of code at start here for managing/tracking Pipeline state that this layer cares about
1997
1998static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
1999
2000// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
2001//   Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
2002//   to that same cmd buffer by separate thread are not changing state from underneath us
2003// Track the last cmd buffer touched by this thread
2004
2005static bool hasDrawCmd(GLOBAL_CB_NODE *pCB) {
2006    for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
2007        if (pCB->drawCount[i])
2008            return true;
2009    }
2010    return false;
2011}
2012
2013// Check object status for selected flag state
2014static bool validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
2015                            DRAW_STATE_ERROR error_code, const char *fail_msg) {
2016    if (!(pNode->status & status_mask)) {
2017        return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2018                       reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, error_code, "DS",
2019                       "CB object %#" PRIxLEAST64 ": %s", reinterpret_cast<const uint64_t &>(pNode->commandBuffer), fail_msg);
2020    }
2021    return false;
2022}
2023
2024// Retrieve pipeline node ptr for given pipeline object
2025static PIPELINE_NODE *getPipeline(layer_data *my_data, const VkPipeline pipeline) {
2026    if (my_data->pipelineMap.find(pipeline) == my_data->pipelineMap.end()) {
2027        return NULL;
2028    }
2029    return my_data->pipelineMap[pipeline];
2030}
2031
2032// Return true if for a given PSO, the given state enum is dynamic, else return false
2033static bool isDynamic(const PIPELINE_NODE *pPipeline, const VkDynamicState state) {
2034    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2035        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2036            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
2037                return true;
2038        }
2039    }
2040    return false;
2041}
2042
2043// Validate state stored as flags at time of draw call
2044static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe, bool indexedDraw) {
2045    bool result;
2046    result = validate_status(dev_data, pCB, CBSTATUS_VIEWPORT_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_VIEWPORT_NOT_BOUND,
2047                             "Dynamic viewport state not set for this command buffer");
2048    result |= validate_status(dev_data, pCB, CBSTATUS_SCISSOR_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_SCISSOR_NOT_BOUND,
2049                              "Dynamic scissor state not set for this command buffer");
2050    if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
2051        ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
2052         (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
2053        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2054                                  DRAWSTATE_LINE_WIDTH_NOT_BOUND, "Dynamic line width state not set for this command buffer");
2055    }
2056    if (pPipe->graphicsPipelineCI.pRasterizationState &&
2057        (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
2058        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2059                                  DRAWSTATE_DEPTH_BIAS_NOT_BOUND, "Dynamic depth bias state not set for this command buffer");
2060    }
2061    if (pPipe->blendConstantsEnabled) {
2062        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2063                                  DRAWSTATE_BLEND_NOT_BOUND, "Dynamic blend constants state not set for this command buffer");
2064    }
2065    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2066        (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
2067        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2068                                  DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, "Dynamic depth bounds state not set for this command buffer");
2069    }
2070    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2071        (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
2072        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2073                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil read mask state not set for this command buffer");
2074        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2075                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil write mask state not set for this command buffer");
2076        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2077                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil reference state not set for this command buffer");
2078    }
2079    if (indexedDraw) {
2080        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2081                                  DRAWSTATE_INDEX_BUFFER_NOT_BOUND,
2082                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted");
2083    }
2084    return result;
2085}
2086
2087// Verify attachment reference compatibility according to spec
2088//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2089//  If both AttachmentReference arrays have requested index, check their corresponding AttachementDescriptions
2090//   to make sure that format and samples counts match.
2091//  If not, they are not compatible.
2092static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2093                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2094                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2095                                             const VkAttachmentDescription *pSecondaryAttachments) {
2096    if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2097        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment)
2098            return true;
2099    } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2100        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment)
2101            return true;
2102    } else { // format and sample count must match
2103        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2104             pSecondaryAttachments[pSecondary[index].attachment].format) &&
2105            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2106             pSecondaryAttachments[pSecondary[index].attachment].samples))
2107            return true;
2108    }
2109    // Format and sample counts didn't match
2110    return false;
2111}
2112
2113// For give primary and secondary RenderPass objects, verify that they're compatible
2114static bool verify_renderpass_compatibility(layer_data *my_data, const VkRenderPass primaryRP, const VkRenderPass secondaryRP,
2115                                            string &errorMsg) {
2116    if (my_data->renderPassMap.find(primaryRP) == my_data->renderPassMap.end()) {
2117        stringstream errorStr;
2118        errorStr << "invalid VkRenderPass (" << primaryRP << ")";
2119        errorMsg = errorStr.str();
2120        return false;
2121    } else if (my_data->renderPassMap.find(secondaryRP) == my_data->renderPassMap.end()) {
2122        stringstream errorStr;
2123        errorStr << "invalid VkRenderPass (" << secondaryRP << ")";
2124        errorMsg = errorStr.str();
2125        return false;
2126    }
2127    // Trivial pass case is exact same RP
2128    if (primaryRP == secondaryRP) {
2129        return true;
2130    }
2131    const VkRenderPassCreateInfo *primaryRPCI = my_data->renderPassMap[primaryRP]->pCreateInfo;
2132    const VkRenderPassCreateInfo *secondaryRPCI = my_data->renderPassMap[secondaryRP]->pCreateInfo;
2133    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2134        stringstream errorStr;
2135        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2136                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2137        errorMsg = errorStr.str();
2138        return false;
2139    }
2140    uint32_t spIndex = 0;
2141    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2142        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2143        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2144        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2145        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2146        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2147            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2148                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2149                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2150                stringstream errorStr;
2151                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2152                errorMsg = errorStr.str();
2153                return false;
2154            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2155                                                         primaryColorCount, primaryRPCI->pAttachments,
2156                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2157                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2158                stringstream errorStr;
2159                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2160                errorMsg = errorStr.str();
2161                return false;
2162            }
2163        }
2164
2165        if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2166                                              1, primaryRPCI->pAttachments,
2167                                              secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2168                                              1, secondaryRPCI->pAttachments)) {
2169            stringstream errorStr;
2170            errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
2171            errorMsg = errorStr.str();
2172            return false;
2173        }
2174
2175        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2176        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2177        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2178        for (uint32_t i = 0; i < inputMax; ++i) {
2179            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2180                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2181                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2182                stringstream errorStr;
2183                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2184                errorMsg = errorStr.str();
2185                return false;
2186            }
2187        }
2188    }
2189    return true;
2190}
2191
2192// For give SET_NODE, verify that its Set is compatible w/ the setLayout corresponding to pipelineLayout[layoutIndex]
2193static bool verify_set_layout_compatibility(layer_data *my_data, const SET_NODE *pSet, const VkPipelineLayout layout,
2194                                            const uint32_t layoutIndex, string &errorMsg) {
2195    auto pipeline_layout_it = my_data->pipelineLayoutMap.find(layout);
2196    if (pipeline_layout_it == my_data->pipelineLayoutMap.end()) {
2197        stringstream errorStr;
2198        errorStr << "invalid VkPipelineLayout (" << layout << ")";
2199        errorMsg = errorStr.str();
2200        return false;
2201    }
2202    if (layoutIndex >= pipeline_layout_it->second.descriptorSetLayouts.size()) {
2203        stringstream errorStr;
2204        errorStr << "VkPipelineLayout (" << layout << ") only contains " << pipeline_layout_it->second.descriptorSetLayouts.size()
2205                 << " setLayouts corresponding to sets 0-" << pipeline_layout_it->second.descriptorSetLayouts.size() - 1
2206                 << ", but you're attempting to bind set to index " << layoutIndex;
2207        errorMsg = errorStr.str();
2208        return false;
2209    }
2210    // Get the specific setLayout from PipelineLayout that overlaps this set
2211    LAYOUT_NODE *pLayoutNode = my_data->descriptorSetLayoutMap[pipeline_layout_it->second.descriptorSetLayouts[layoutIndex]];
2212    if (pLayoutNode->layout == pSet->pLayout->layout) { // trivial pass case
2213        return true;
2214    }
2215    size_t descriptorCount = pLayoutNode->descriptorTypes.size();
2216    if (descriptorCount != pSet->pLayout->descriptorTypes.size()) {
2217        stringstream errorStr;
2218        errorStr << "setLayout " << layoutIndex << " from pipelineLayout " << layout << " has " << descriptorCount
2219                 << " descriptors, but corresponding set being bound has " << pSet->pLayout->descriptorTypes.size()
2220                 << " descriptors.";
2221        errorMsg = errorStr.str();
2222        return false; // trivial fail case
2223    }
2224    // Now need to check set against corresponding pipelineLayout to verify compatibility
2225    for (size_t i = 0; i < descriptorCount; ++i) {
2226        // Need to verify that layouts are identically defined
2227        //  TODO : Is below sufficient? Making sure that types & stageFlags match per descriptor
2228        //    do we also need to check immutable samplers?
2229        if (pLayoutNode->descriptorTypes[i] != pSet->pLayout->descriptorTypes[i]) {
2230            stringstream errorStr;
2231            errorStr << "descriptor " << i << " for descriptorSet being bound is type '"
2232                     << string_VkDescriptorType(pSet->pLayout->descriptorTypes[i])
2233                     << "' but corresponding descriptor from pipelineLayout is type '"
2234                     << string_VkDescriptorType(pLayoutNode->descriptorTypes[i]) << "'";
2235            errorMsg = errorStr.str();
2236            return false;
2237        }
2238        if (pLayoutNode->stageFlags[i] != pSet->pLayout->stageFlags[i]) {
2239            stringstream errorStr;
2240            errorStr << "stageFlags " << i << " for descriptorSet being bound is " << pSet->pLayout->stageFlags[i]
2241                     << "' but corresponding descriptor from pipelineLayout has stageFlags " << pLayoutNode->stageFlags[i];
2242            errorMsg = errorStr.str();
2243            return false;
2244        }
2245    }
2246    return true;
2247}
2248
2249// Validate that data for each specialization entry is fully contained within the buffer.
2250static bool validate_specialization_offsets(layer_data *my_data, VkPipelineShaderStageCreateInfo const *info) {
2251    bool pass = true;
2252
2253    VkSpecializationInfo const *spec = info->pSpecializationInfo;
2254
2255    if (spec) {
2256        for (auto i = 0u; i < spec->mapEntryCount; i++) {
2257            if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2258                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2259                            /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC",
2260                            "Specialization entry %u (for constant id %u) references memory outside provided "
2261                            "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2262                            " bytes provided)",
2263                            i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2264                            spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) {
2265
2266                    pass = false;
2267                }
2268            }
2269        }
2270    }
2271
2272    return pass;
2273}
2274
2275static bool descriptor_type_match(layer_data *my_data, shader_module const *module, uint32_t type_id,
2276                                  VkDescriptorType descriptor_type, unsigned &descriptor_count) {
2277    auto type = module->get_def(type_id);
2278
2279    descriptor_count = 1;
2280
2281    /* Strip off any array or ptrs. Where we remove array levels, adjust the
2282     * descriptor count for each dimension. */
2283    while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2284        if (type.opcode() == spv::OpTypeArray) {
2285            descriptor_count *= get_constant_value(module, type.word(3));
2286            type = module->get_def(type.word(2));
2287        }
2288        else {
2289            type = module->get_def(type.word(3));
2290        }
2291    }
2292
2293    switch (type.opcode()) {
2294    case spv::OpTypeStruct: {
2295        for (auto insn : *module) {
2296            if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2297                if (insn.word(2) == spv::DecorationBlock) {
2298                    return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2299                           descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2300                } else if (insn.word(2) == spv::DecorationBufferBlock) {
2301                    return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2302                           descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2303                }
2304            }
2305        }
2306
2307        /* Invalid */
2308        return false;
2309    }
2310
2311    case spv::OpTypeSampler:
2312        return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER;
2313
2314    case spv::OpTypeSampledImage:
2315        if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
2316            /* Slight relaxation for some GLSL historical madness: samplerBuffer
2317             * doesn't really have a sampler, and a texel buffer descriptor
2318             * doesn't really provide one. Allow this slight mismatch.
2319             */
2320            auto image_type = module->get_def(type.word(2));
2321            auto dim = image_type.word(3);
2322            auto sampled = image_type.word(7);
2323            return dim == spv::DimBuffer && sampled == 1;
2324        }
2325        return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2326
2327    case spv::OpTypeImage: {
2328        /* Many descriptor types backing image types-- depends on dimension
2329         * and whether the image will be used with a sampler. SPIRV for
2330         * Vulkan requires that sampled be 1 or 2 -- leaving the decision to
2331         * runtime is unacceptable.
2332         */
2333        auto dim = type.word(3);
2334        auto sampled = type.word(7);
2335
2336        if (dim == spv::DimSubpassData) {
2337            return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2338        } else if (dim == spv::DimBuffer) {
2339            if (sampled == 1) {
2340                return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2341            } else {
2342                return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2343            }
2344        } else if (sampled == 1) {
2345            return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
2346        } else {
2347            return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2348        }
2349    }
2350
2351    /* We shouldn't really see any other junk types -- but if we do, they're
2352     * a mismatch.
2353     */
2354    default:
2355        return false; /* Mismatch */
2356    }
2357}
2358
2359static bool require_feature(layer_data *my_data, VkBool32 feature, char const *feature_name) {
2360    if (!feature) {
2361        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2362                    __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2363                    "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2364                    "enabled on the device",
2365                    feature_name)) {
2366            return false;
2367        }
2368    }
2369
2370    return true;
2371}
2372
2373static bool validate_shader_capabilities(layer_data *my_data, shader_module const *src) {
2374    bool pass = true;
2375
2376    auto enabledFeatures = &my_data->phys_dev_properties.features;
2377
2378    for (auto insn : *src) {
2379        if (insn.opcode() == spv::OpCapability) {
2380            switch (insn.word(1)) {
2381            case spv::CapabilityMatrix:
2382            case spv::CapabilityShader:
2383            case spv::CapabilityInputAttachment:
2384            case spv::CapabilitySampled1D:
2385            case spv::CapabilityImage1D:
2386            case spv::CapabilitySampledBuffer:
2387            case spv::CapabilityImageBuffer:
2388            case spv::CapabilityImageQuery:
2389            case spv::CapabilityDerivativeControl:
2390                // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2391                break;
2392
2393            case spv::CapabilityGeometry:
2394                pass &= require_feature(my_data, enabledFeatures->geometryShader, "geometryShader");
2395                break;
2396
2397            case spv::CapabilityTessellation:
2398                pass &= require_feature(my_data, enabledFeatures->tessellationShader, "tessellationShader");
2399                break;
2400
2401            case spv::CapabilityFloat64:
2402                pass &= require_feature(my_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2403                break;
2404
2405            case spv::CapabilityInt64:
2406                pass &= require_feature(my_data, enabledFeatures->shaderInt64, "shaderInt64");
2407                break;
2408
2409            case spv::CapabilityTessellationPointSize:
2410            case spv::CapabilityGeometryPointSize:
2411                pass &= require_feature(my_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2412                                        "shaderTessellationAndGeometryPointSize");
2413                break;
2414
2415            case spv::CapabilityImageGatherExtended:
2416                pass &= require_feature(my_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2417                break;
2418
2419            case spv::CapabilityStorageImageMultisample:
2420                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2421                break;
2422
2423            case spv::CapabilityUniformBufferArrayDynamicIndexing:
2424                pass &= require_feature(my_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2425                                        "shaderUniformBufferArrayDynamicIndexing");
2426                break;
2427
2428            case spv::CapabilitySampledImageArrayDynamicIndexing:
2429                pass &= require_feature(my_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2430                                        "shaderSampledImageArrayDynamicIndexing");
2431                break;
2432
2433            case spv::CapabilityStorageBufferArrayDynamicIndexing:
2434                pass &= require_feature(my_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2435                                        "shaderStorageBufferArrayDynamicIndexing");
2436                break;
2437
2438            case spv::CapabilityStorageImageArrayDynamicIndexing:
2439                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2440                                        "shaderStorageImageArrayDynamicIndexing");
2441                break;
2442
2443            case spv::CapabilityClipDistance:
2444                pass &= require_feature(my_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2445                break;
2446
2447            case spv::CapabilityCullDistance:
2448                pass &= require_feature(my_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2449                break;
2450
2451            case spv::CapabilityImageCubeArray:
2452                pass &= require_feature(my_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2453                break;
2454
2455            case spv::CapabilitySampleRateShading:
2456                pass &= require_feature(my_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2457                break;
2458
2459            case spv::CapabilitySparseResidency:
2460                pass &= require_feature(my_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2461                break;
2462
2463            case spv::CapabilityMinLod:
2464                pass &= require_feature(my_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2465                break;
2466
2467            case spv::CapabilitySampledCubeArray:
2468                pass &= require_feature(my_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2469                break;
2470
2471            case spv::CapabilityImageMSArray:
2472                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2473                break;
2474
2475            case spv::CapabilityStorageImageExtendedFormats:
2476                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageExtendedFormats,
2477                                        "shaderStorageImageExtendedFormats");
2478                break;
2479
2480            case spv::CapabilityInterpolationFunction:
2481                pass &= require_feature(my_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2482                break;
2483
2484            case spv::CapabilityStorageImageReadWithoutFormat:
2485                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2486                                        "shaderStorageImageReadWithoutFormat");
2487                break;
2488
2489            case spv::CapabilityStorageImageWriteWithoutFormat:
2490                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2491                                        "shaderStorageImageWriteWithoutFormat");
2492                break;
2493
2494            case spv::CapabilityMultiViewport:
2495                pass &= require_feature(my_data, enabledFeatures->multiViewport, "multiViewport");
2496                break;
2497
2498            default:
2499                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2500                            __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2501                            "Shader declares capability %u, not supported in Vulkan.",
2502                            insn.word(1)))
2503                    pass = false;
2504                break;
2505            }
2506        }
2507    }
2508
2509    return pass;
2510}
2511
2512static bool validate_pipeline_shader_stage(layer_data *dev_data, VkPipelineShaderStageCreateInfo const *pStage,
2513                                           PIPELINE_NODE *pipeline, PIPELINE_LAYOUT_NODE *pipelineLayout,
2514                                           shader_module **out_module, spirv_inst_iter *out_entrypoint) {
2515    bool pass = true;
2516    auto module = *out_module = dev_data->shaderModuleMap[pStage->module].get();
2517    pass &= validate_specialization_offsets(dev_data, pStage);
2518
2519    /* find the entrypoint */
2520    auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
2521    if (entrypoint == module->end()) {
2522        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2523                    __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC",
2524                    "No entrypoint found named `%s` for stage %s", pStage->pName,
2525                    string_VkShaderStageFlagBits(pStage->stage))) {
2526            pass = false;
2527        }
2528    }
2529
2530    /* validate shader capabilities against enabled device features */
2531    pass &= validate_shader_capabilities(dev_data, module);
2532
2533    /* mark accessible ids */
2534    std::unordered_set<uint32_t> accessible_ids;
2535    mark_accessible_ids(module, entrypoint, accessible_ids);
2536
2537    /* validate descriptor set layout against what the entrypoint actually uses */
2538    std::map<descriptor_slot_t, interface_var> descriptor_uses;
2539    collect_interface_by_descriptor_slot(dev_data, module, accessible_ids, descriptor_uses);
2540
2541    /* validate push constant usage */
2542    pass &= validate_push_constant_usage(dev_data, &pipelineLayout->pushConstantRanges,
2543                                        module, accessible_ids, pStage->stage);
2544
2545    /* validate descriptor use */
2546    for (auto use : descriptor_uses) {
2547        // While validating shaders capture which slots are used by the pipeline
2548        pipeline->active_slots[use.first.first].insert(use.first.second);
2549
2550        /* find the matching binding */
2551        auto binding = get_descriptor_binding(dev_data, pipelineLayout, use.first);
2552        unsigned required_descriptor_count;
2553
2554        if (!binding) {
2555            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2556                        __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2557                        "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2558                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2559                pass = false;
2560            }
2561        } else if (~binding->stageFlags & pStage->stage) {
2562            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2563                        /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2564                        "Shader uses descriptor slot %u.%u (used "
2565                        "as type `%s`) but descriptor not "
2566                        "accessible from stage %s",
2567                        use.first.first, use.first.second,
2568                        describe_type(module, use.second.type_id).c_str(),
2569                        string_VkShaderStageFlagBits(pStage->stage))) {
2570                pass = false;
2571            }
2572        } else if (!descriptor_type_match(dev_data, module, use.second.type_id, binding->descriptorType, /*out*/ required_descriptor_count)) {
2573            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2574                        __LINE__, SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2575                        "Type mismatch on descriptor slot "
2576                        "%u.%u (used as type `%s`) but "
2577                        "descriptor of type %s",
2578                        use.first.first, use.first.second,
2579                        describe_type(module, use.second.type_id).c_str(),
2580                        string_VkDescriptorType(binding->descriptorType))) {
2581                pass = false;
2582            }
2583        } else if (binding->descriptorCount < required_descriptor_count) {
2584            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2585                        __LINE__, SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2586                        "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2587                        required_descriptor_count, use.first.first, use.first.second,
2588                        describe_type(module, use.second.type_id).c_str(),
2589                        binding->descriptorCount)) {
2590                pass = false;
2591            }
2592        }
2593    }
2594
2595    return pass;
2596}
2597
2598
2599// Validate that the shaders used by the given pipeline and store the active_slots
2600//  that are actually used by the pipeline into pPipeline->active_slots
2601static bool validate_and_capture_pipeline_shader_state(layer_data *my_data, PIPELINE_NODE *pPipeline) {
2602    auto pCreateInfo = reinterpret_cast<VkGraphicsPipelineCreateInfo const *>(&pPipeline->graphicsPipelineCI);
2603    int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2604    int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2605
2606    shader_module *shaders[5];
2607    memset(shaders, 0, sizeof(shaders));
2608    spirv_inst_iter entrypoints[5];
2609    memset(entrypoints, 0, sizeof(entrypoints));
2610    VkPipelineVertexInputStateCreateInfo const *vi = 0;
2611    bool pass = true;
2612
2613    auto pipelineLayout = pCreateInfo->layout != VK_NULL_HANDLE ? &my_data->pipelineLayoutMap[pCreateInfo->layout] : nullptr;
2614
2615    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2616        VkPipelineShaderStageCreateInfo const *pStage =
2617            reinterpret_cast<VkPipelineShaderStageCreateInfo const *>(&pCreateInfo->pStages[i]);
2618        auto stage_id = get_shader_stage_id(pStage->stage);
2619        pass &= validate_pipeline_shader_stage(my_data, pStage, pPipeline, pipelineLayout,
2620                                               &shaders[stage_id], &entrypoints[stage_id]);
2621    }
2622
2623    vi = pCreateInfo->pVertexInputState;
2624
2625    if (vi) {
2626        pass &= validate_vi_consistency(my_data, vi);
2627    }
2628
2629    if (shaders[vertex_stage]) {
2630        pass &= validate_vi_against_vs_inputs(my_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
2631    }
2632
2633    int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2634    int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2635
2636    while (!shaders[producer] && producer != fragment_stage) {
2637        producer++;
2638        consumer++;
2639    }
2640
2641    for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2642        assert(shaders[producer]);
2643        if (shaders[consumer]) {
2644            pass &= validate_interface_between_stages(my_data,
2645                                                      shaders[producer], entrypoints[producer], &shader_stage_attribs[producer],
2646                                                      shaders[consumer], entrypoints[consumer], &shader_stage_attribs[consumer]);
2647
2648            producer = consumer;
2649        }
2650    }
2651
2652    auto rp = pCreateInfo->renderPass != VK_NULL_HANDLE ? my_data->renderPassMap[pCreateInfo->renderPass] : nullptr;
2653
2654    if (shaders[fragment_stage] && rp) {
2655        pass &= validate_fs_outputs_against_render_pass(my_data, shaders[fragment_stage], entrypoints[fragment_stage], rp,
2656                                                       pCreateInfo->subpass);
2657    }
2658
2659    return pass;
2660}
2661
2662static bool validate_compute_pipeline(layer_data *my_data, PIPELINE_NODE *pPipeline) {
2663    auto pCreateInfo = reinterpret_cast<VkComputePipelineCreateInfo const *>(&pPipeline->computePipelineCI);
2664
2665    auto pipelineLayout = pCreateInfo->layout != VK_NULL_HANDLE ? &my_data->pipelineLayoutMap[pCreateInfo->layout] : nullptr;
2666
2667    shader_module *module;
2668    spirv_inst_iter entrypoint;
2669
2670    return validate_pipeline_shader_stage(my_data, &pCreateInfo->stage, pPipeline, pipelineLayout,
2671                                          &module, &entrypoint);
2672}
2673
2674// Return Set node ptr for specified set or else NULL
2675static SET_NODE *getSetNode(layer_data *my_data, const VkDescriptorSet set) {
2676    if (my_data->setMap.find(set) == my_data->setMap.end()) {
2677        return NULL;
2678    }
2679    return my_data->setMap[set];
2680}
2681
2682// For given Layout Node and binding, return index where that binding begins
2683static uint32_t getBindingStartIndex(const LAYOUT_NODE *pLayout, const uint32_t binding) {
2684    uint32_t offsetIndex = 0;
2685    for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
2686        if (pLayout->createInfo.pBindings[i].binding == binding)
2687            break;
2688        offsetIndex += pLayout->createInfo.pBindings[i].descriptorCount;
2689    }
2690    return offsetIndex;
2691}
2692
2693// For given layout node and binding, return last index that is updated
2694static uint32_t getBindingEndIndex(const LAYOUT_NODE *pLayout, const uint32_t binding) {
2695    uint32_t offsetIndex = 0;
2696    for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
2697        offsetIndex += pLayout->createInfo.pBindings[i].descriptorCount;
2698        if (pLayout->createInfo.pBindings[i].binding == binding)
2699            break;
2700    }
2701    return offsetIndex - 1;
2702}
2703
2704// For the given command buffer, verify and update the state for activeSetBindingsPairs
2705//  This includes:
2706//  1. Verifying that any dynamic descriptor in that set has a valid dynamic offset bound.
2707//     To be valid, the dynamic offset combined with the offset and range from its
2708//     descriptor update must not overflow the size of its buffer being updated
2709//  2. Grow updateImages for given pCB to include any bound STORAGE_IMAGE descriptor images
2710//  3. Grow updateBuffers for pCB to include buffers from STORAGE*_BUFFER descriptor buffers
2711static bool validate_and_update_drawtime_descriptor_state(
2712    layer_data *dev_data, GLOBAL_CB_NODE *pCB,
2713    const vector<std::pair<SET_NODE *, unordered_set<uint32_t>>> &activeSetBindingsPairs) {
2714    bool result = false;
2715
2716    VkWriteDescriptorSet *pWDS = NULL;
2717    uint32_t dynOffsetIndex = 0;
2718    VkDeviceSize bufferSize = 0;
2719    for (auto set_bindings_pair : activeSetBindingsPairs) {
2720        SET_NODE *set_node = set_bindings_pair.first;
2721        LAYOUT_NODE *layout_node = set_node->pLayout;
2722        for (auto binding : set_bindings_pair.second) {
2723            uint32_t startIdx = getBindingStartIndex(layout_node, binding);
2724            uint32_t endIdx = getBindingEndIndex(layout_node, binding);
2725            for (uint32_t i = startIdx; i <= endIdx; ++i) {
2726                // We did check earlier to verify that set was updated, but now make sure given slot was updated
2727                // TODO : Would be better to store set# that set is bound to so we can report set.binding[index] not updated
2728                if (!set_node->pDescriptorUpdates[i]) {
2729                    result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2730                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<const uint64_t &>(set_node->set), __LINE__,
2731                                        DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2732                                        "DS %#" PRIxLEAST64 " bound and active but it never had binding %u updated. It is now being used to draw so "
2733                                                            "this will result in undefined behavior.",
2734                                        reinterpret_cast<const uint64_t &>(set_node->set), binding);
2735                } else {
2736                    switch (set_node->pDescriptorUpdates[i]->sType) {
2737                    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
2738                        pWDS = (VkWriteDescriptorSet *)set_node->pDescriptorUpdates[i];
2739
2740                        // Verify uniform and storage buffers actually are bound to valid memory at draw time.
2741                        if ((pWDS->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) ||
2742                            (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
2743                            (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER) ||
2744                            (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
2745                            for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2746                                auto buffer_node = dev_data->bufferMap.find(pWDS->pBufferInfo[j].buffer);
2747                                if (buffer_node == dev_data->bufferMap.end()) {
2748                                    result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2749                                                      VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2750                                                      reinterpret_cast<const uint64_t &>(set_node->set), __LINE__,
2751                                                      DRAWSTATE_INVALID_BUFFER, "DS",
2752                                                      "VkDescriptorSet (%#" PRIxLEAST64 ") %s (%#" PRIxLEAST64 ") at index #%u"
2753                                                      " is not defined!  Has vkCreateBuffer been called?",
2754                                                      reinterpret_cast<const uint64_t &>(set_node->set),
2755                                                      string_VkDescriptorType(pWDS->descriptorType),
2756                                                      reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), i);
2757                                } else {
2758                                    auto mem_entry = dev_data->memObjMap.find(buffer_node->second.mem);
2759                                    if (mem_entry == dev_data->memObjMap.end()) {
2760                                        result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2761                                                          VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2762                                                          reinterpret_cast<const uint64_t &>(set_node->set), __LINE__,
2763                                                          DRAWSTATE_INVALID_BUFFER, "DS",
2764                                                          "VkDescriptorSet (%#" PRIxLEAST64 ") %s (%#" PRIxLEAST64 ") at index"
2765                                                          " #%u, has no memory bound to it!",
2766                                                          reinterpret_cast<const uint64_t &>(set_node->set),
2767                                                          string_VkDescriptorType(pWDS->descriptorType),
2768                                                          reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), i);
2769                                    }
2770                                }
2771                                // If it's a dynamic buffer, make sure the offsets are within the buffer.
2772                                if ((pWDS->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
2773                                    (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
2774                                    bufferSize = dev_data->bufferMap[pWDS->pBufferInfo[j].buffer].createInfo.size;
2775                                    uint32_t dynOffset =
2776                                        pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].dynamicOffsets[dynOffsetIndex];
2777                                    if (pWDS->pBufferInfo[j].range == VK_WHOLE_SIZE) {
2778                                        if ((dynOffset + pWDS->pBufferInfo[j].offset) > bufferSize) {
2779                                            result |= log_msg(
2780                                                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2781                                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2782                                                reinterpret_cast<const uint64_t &>(set_node->set), __LINE__,
2783                                                DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, "DS",
2784                                                "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has range of "
2785                                                "VK_WHOLE_SIZE but dynamic offset %#" PRIxLEAST32 ". "
2786                                                "combined with offset %#" PRIxLEAST64 " oversteps its buffer (%#" PRIxLEAST64
2787                                                ") which has a size of %#" PRIxLEAST64 ".",
2788                                                reinterpret_cast<const uint64_t &>(set_node->set), i, dynOffset,
2789                                                pWDS->pBufferInfo[j].offset,
2790                                                reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize);
2791                                        }
2792                                    } else if ((dynOffset + pWDS->pBufferInfo[j].offset + pWDS->pBufferInfo[j].range) >
2793                                               bufferSize) {
2794                                        result |=
2795                                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2796                                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2797                                                    reinterpret_cast<const uint64_t &>(set_node->set), __LINE__,
2798                                                    DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, "DS",
2799                                                    "VkDescriptorSet (%#" PRIxLEAST64
2800                                                    ") bound as set #%u has dynamic offset %#" PRIxLEAST32 ". "
2801                                                    "Combined with offset %#" PRIxLEAST64 " and range %#" PRIxLEAST64
2802                                                    " from its update, this oversteps its buffer "
2803                                                    "(%#" PRIxLEAST64 ") which has a size of %#" PRIxLEAST64 ".",
2804                                                    reinterpret_cast<const uint64_t &>(set_node->set), i, dynOffset,
2805                                                    pWDS->pBufferInfo[j].offset, pWDS->pBufferInfo[j].range,
2806                                                    reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize);
2807                                    }
2808                                    dynOffsetIndex++;
2809                                }
2810                            }
2811                        }
2812                        if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
2813                            for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2814                                pCB->updateImages.insert(pWDS->pImageInfo[j].imageView);
2815                            }
2816                        } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) {
2817                            for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2818                                assert(dev_data->bufferViewMap.find(pWDS->pTexelBufferView[j]) != dev_data->bufferViewMap.end());
2819                                pCB->updateBuffers.insert(dev_data->bufferViewMap[pWDS->pTexelBufferView[j]].buffer);
2820                            }
2821                        } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2822                                   pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
2823                            for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2824                                pCB->updateBuffers.insert(pWDS->pBufferInfo[j].buffer);
2825                            }
2826                        }
2827                        i += pWDS->descriptorCount; // Advance i to end of this set of descriptors (++i at end of for loop will move 1
2828                                                    // index past last of these descriptors)
2829                        break;
2830                    default: // Currently only shadowing Write update nodes so shouldn't get here
2831                        assert(0);
2832                        continue;
2833                    }
2834                }
2835            }
2836        }
2837    }
2838    return result;
2839}
2840// TODO : This is a temp function that naively updates bound storage images and buffers based on which descriptor sets are bound.
2841//   When validate_and_update_draw_state() handles computer shaders so that active_slots is correct for compute pipelines, this
2842//   function can be killed and validate_and_update_draw_state() used instead
2843static void update_shader_storage_images_and_buffers(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
2844    VkWriteDescriptorSet *pWDS = nullptr;
2845    SET_NODE *pSet = nullptr;
2846    // For the bound descriptor sets, pull off any storage images and buffers
2847    //  This may be more than are actually updated depending on which are active, but for now this is a stop-gap for compute
2848    //  pipelines
2849    for (auto set : pCB->lastBound[VK_PIPELINE_BIND_POINT_COMPUTE].uniqueBoundSets) {
2850        // Get the set node
2851        pSet = getSetNode(dev_data, set);
2852        // For each update in the set
2853        for (auto pUpdate : pSet->pDescriptorUpdates) {
2854            // If it's a write update to STORAGE type capture image/buffer being updated
2855            if (pUpdate && (pUpdate->sType == VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET)) {
2856                pWDS = reinterpret_cast<VkWriteDescriptorSet *>(pUpdate);
2857                if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
2858                    for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2859                        pCB->updateImages.insert(pWDS->pImageInfo[j].imageView);
2860                    }
2861                } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) {
2862                    for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2863                        pCB->updateBuffers.insert(dev_data->bufferViewMap[pWDS->pTexelBufferView[j]].buffer);
2864                    }
2865                } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2866                           pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
2867                    for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2868                        pCB->updateBuffers.insert(pWDS->pBufferInfo[j].buffer);
2869                    }
2870                }
2871            }
2872        }
2873    }
2874}
2875
2876// Validate overall state at the time of a draw call
2877static bool validate_and_update_draw_state(layer_data *my_data, GLOBAL_CB_NODE *pCB, const bool indexedDraw,
2878                                           const VkPipelineBindPoint bindPoint) {
2879    bool result = false;
2880    auto const &state = pCB->lastBound[bindPoint];
2881    PIPELINE_NODE *pPipe = getPipeline(my_data, state.pipeline);
2882    // First check flag states
2883    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
2884        result = validate_draw_state_flags(my_data, pCB, pPipe, indexedDraw);
2885
2886    // Now complete other state checks
2887    // TODO : Currently only performing next check if *something* was bound (non-zero last bound)
2888    //  There is probably a better way to gate when this check happens, and to know if something *should* have been bound
2889    //  We should have that check separately and then gate this check based on that check
2890    if (pPipe) {
2891        if (state.pipelineLayout) {
2892            string errorString;
2893            // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets
2894            vector<std::pair<SET_NODE *, unordered_set<uint32_t>>> activeSetBindingsPairs;
2895            for (auto setBindingPair : pPipe->active_slots) {
2896                uint32_t setIndex = setBindingPair.first;
2897                // If valid set is not bound throw an error
2898                if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
2899                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2900                                      __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
2901                                      "VkPipeline %#" PRIxLEAST64 " uses set #%u but that set is not bound.",
2902                                      (uint64_t)pPipe->pipeline, setIndex);
2903                } else if (!verify_set_layout_compatibility(my_data, my_data->setMap[state.boundDescriptorSets[setIndex]],
2904                                                            pPipe->graphicsPipelineCI.layout, setIndex, errorString)) {
2905                    // Set is bound but not compatible w/ overlapping pipelineLayout from PSO
2906                    VkDescriptorSet setHandle = my_data->setMap[state.boundDescriptorSets[setIndex]]->set;
2907                    result |= log_msg(
2908                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2909                        (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
2910                        "VkDescriptorSet (%#" PRIxLEAST64
2911                        ") bound as set #%u is not compatible with overlapping VkPipelineLayout %#" PRIxLEAST64 " due to: %s",
2912                        (uint64_t)setHandle, setIndex, (uint64_t)pPipe->graphicsPipelineCI.layout, errorString.c_str());
2913                } else { // Valid set is bound and layout compatible, validate that it's updated
2914                    // Pull the set node
2915                    SET_NODE *pSet = my_data->setMap[state.boundDescriptorSets[setIndex]];
2916                    // Save vector of all active sets to verify dynamicOffsets below
2917                    // activeSetNodes.push_back(pSet);
2918                    activeSetBindingsPairs.push_back(std::make_pair(pSet, setBindingPair.second));
2919                    // Make sure set has been updated
2920                    if (!pSet->pUpdateStructs) {
2921                        result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2922                                          VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pSet->set, __LINE__,
2923                                          DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2924                                          "DS %#" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so "
2925                                                              "this will result in undefined behavior.",
2926                                          (uint64_t)pSet->set);
2927                    }
2928                }
2929            }
2930            // For given active slots, verify any dynamic descriptors and record updated images & buffers
2931            result |= validate_and_update_drawtime_descriptor_state(my_data, pCB, activeSetBindingsPairs);
2932        }
2933        if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint) {
2934            // Verify Vtx binding
2935            if (pPipe->vertexBindingDescriptions.size() > 0) {
2936                for (size_t i = 0; i < pPipe->vertexBindingDescriptions.size(); i++) {
2937                    if ((pCB->currentDrawData.buffers.size() < (i + 1)) || (pCB->currentDrawData.buffers[i] == VK_NULL_HANDLE)) {
2938                        result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2939                                          __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2940                                          "The Pipeline State Object (%#" PRIxLEAST64
2941                                          ") expects that this Command Buffer's vertex binding Index " PRINTF_SIZE_T_SPECIFIER
2942                                          " should be set via vkCmdBindVertexBuffers.",
2943                                          (uint64_t)state.pipeline, i);
2944                    }
2945                }
2946            } else {
2947                if (!pCB->currentDrawData.buffers.empty()) {
2948                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
2949                                      (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2950                                      "Vertex buffers are bound to command buffer (%#" PRIxLEAST64
2951                                      ") but no vertex buffers are attached to this Pipeline State Object (%#" PRIxLEAST64 ").",
2952                                      (uint64_t)pCB->commandBuffer, (uint64_t)state.pipeline);
2953                }
2954            }
2955            // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
2956            // Skip check if rasterization is disabled or there is no viewport.
2957            if ((!pPipe->graphicsPipelineCI.pRasterizationState ||
2958                 (pPipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
2959                pPipe->graphicsPipelineCI.pViewportState) {
2960                bool dynViewport = isDynamic(pPipe, VK_DYNAMIC_STATE_VIEWPORT);
2961                bool dynScissor = isDynamic(pPipe, VK_DYNAMIC_STATE_SCISSOR);
2962                if (dynViewport) {
2963                    if (pCB->viewports.size() != pPipe->graphicsPipelineCI.pViewportState->viewportCount) {
2964                        result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2965                                          __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2966                                          "Dynamic viewportCount from vkCmdSetViewport() is " PRINTF_SIZE_T_SPECIFIER
2967                                          ", but PSO viewportCount is %u. These counts must match.",
2968                                          pCB->viewports.size(), pPipe->graphicsPipelineCI.pViewportState->viewportCount);
2969                    }
2970                }
2971                if (dynScissor) {
2972                    if (pCB->scissors.size() != pPipe->graphicsPipelineCI.pViewportState->scissorCount) {
2973                        result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2974                                          __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2975                                          "Dynamic scissorCount from vkCmdSetScissor() is " PRINTF_SIZE_T_SPECIFIER
2976                                          ", but PSO scissorCount is %u. These counts must match.",
2977                                          pCB->scissors.size(), pPipe->graphicsPipelineCI.pViewportState->scissorCount);
2978                    }
2979                }
2980            }
2981        }
2982    }
2983    return result;
2984}
2985
2986// Verify that create state for a pipeline is valid
2987static bool verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_NODE *> pPipelines,
2988                                      int pipelineIndex) {
2989    bool skipCall = false;
2990
2991    PIPELINE_NODE *pPipeline = pPipelines[pipelineIndex];
2992
2993    // If create derivative bit is set, check that we've specified a base
2994    // pipeline correctly, and that the base pipeline was created to allow
2995    // derivatives.
2996    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
2997        PIPELINE_NODE *pBasePipeline = nullptr;
2998        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
2999              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
3000            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3001                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3002                                "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
3003        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
3004            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
3005                skipCall |=
3006                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3007                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3008                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
3009            } else {
3010                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
3011            }
3012        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
3013            pBasePipeline = getPipeline(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
3014        }
3015
3016        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
3017            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3018                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3019                                "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
3020        }
3021    }
3022
3023    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
3024        if (!my_data->phys_dev_properties.features.independentBlend) {
3025            if (pPipeline->attachments.size() > 1) {
3026                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
3027                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
3028                    if ((pAttachments[0].blendEnable != pAttachments[i].blendEnable) ||
3029                        (pAttachments[0].srcColorBlendFactor != pAttachments[i].srcColorBlendFactor) ||
3030                        (pAttachments[0].dstColorBlendFactor != pAttachments[i].dstColorBlendFactor) ||
3031                        (pAttachments[0].colorBlendOp != pAttachments[i].colorBlendOp) ||
3032                        (pAttachments[0].srcAlphaBlendFactor != pAttachments[i].srcAlphaBlendFactor) ||
3033                        (pAttachments[0].dstAlphaBlendFactor != pAttachments[i].dstAlphaBlendFactor) ||
3034                        (pAttachments[0].alphaBlendOp != pAttachments[i].alphaBlendOp) ||
3035                        (pAttachments[0].colorWriteMask != pAttachments[i].colorWriteMask)) {
3036                        skipCall |=
3037                            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3038                            DRAWSTATE_INDEPENDENT_BLEND, "DS", "Invalid Pipeline CreateInfo: If independent blend feature not "
3039                            "enabled, all elements of pAttachments must be identical");
3040                    }
3041                }
3042            }
3043        }
3044        if (!my_data->phys_dev_properties.features.logicOp &&
3045            (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
3046            skipCall |=
3047                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3048                        DRAWSTATE_DISABLED_LOGIC_OP, "DS",
3049                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE");
3050        }
3051        if ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable == VK_TRUE) &&
3052            ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOp < VK_LOGIC_OP_CLEAR) ||
3053             (pPipeline->graphicsPipelineCI.pColorBlendState->logicOp > VK_LOGIC_OP_SET))) {
3054            skipCall |=
3055                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3056                        DRAWSTATE_INVALID_LOGIC_OP, "DS",
3057                        "Invalid Pipeline CreateInfo: If logicOpEnable is VK_TRUE, logicOp must be a valid VkLogicOp value");
3058        }
3059    }
3060
3061    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
3062    // produces nonsense errors that confuse users. Other layers should already
3063    // emit errors for renderpass being invalid.
3064    auto rp_data = my_data->renderPassMap.find(pPipeline->graphicsPipelineCI.renderPass);
3065    if (rp_data != my_data->renderPassMap.end() &&
3066        pPipeline->graphicsPipelineCI.subpass >= rp_data->second->pCreateInfo->subpassCount) {
3067        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3068                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
3069                                                                           "is out of range for this renderpass (0..%u)",
3070                            pPipeline->graphicsPipelineCI.subpass, rp_data->second->pCreateInfo->subpassCount - 1);
3071    }
3072
3073    if (!validate_and_capture_pipeline_shader_state(my_data, pPipeline)) {
3074        skipCall = true;
3075    }
3076    // Each shader's stage must be unique
3077    if (pPipeline->duplicate_shaders) {
3078        for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
3079            if (pPipeline->duplicate_shaders & stage) {
3080                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3081                                    __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3082                                    "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
3083                                    string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
3084            }
3085        }
3086    }
3087    // VS is required
3088    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3089        skipCall |=
3090            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3091                    DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vtx Shader required");
3092    }
3093    // Either both or neither TC/TE shaders should be defined
3094    if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) !=
3095        ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) {
3096        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3097                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3098                            "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair");
3099    }
3100    // Compute shaders should be specified independent of Gfx shaders
3101    if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) &&
3102        (pPipeline->active_shaders &
3103         (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
3104          VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) {
3105        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3106                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3107                            "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline");
3108    }
3109    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3110    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3111    if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3112        (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
3113         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3114        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3115                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3116                                                                           "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3117                                                                           "topology for tessellation pipelines");
3118    }
3119    if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
3120        pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3121        if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3122            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3123                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3124                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3125                                                                               "topology is only valid for tessellation pipelines");
3126        }
3127        if (!pPipeline->graphicsPipelineCI.pTessellationState) {
3128            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3129                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3130                                "Invalid Pipeline CreateInfo State: "
3131                                "pTessellationState is NULL when VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3132                                "topology used. pTessellationState must not be NULL in this case.");
3133        } else if (!pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints ||
3134                   (pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints > 32)) {
3135            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3136                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3137                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3138                                                                               "topology used with patchControlPoints value %u."
3139                                                                               " patchControlPoints should be >0 and <=32.",
3140                                pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints);
3141        }
3142    }
3143    // Viewport state must be included if rasterization is enabled.
3144    // If the viewport state is included, the viewport and scissor counts should always match.
3145    // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler
3146    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3147        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3148        if (!pPipeline->graphicsPipelineCI.pViewportState) {
3149            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3150                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport "
3151                                                                           "and scissors are dynamic PSO must include "
3152                                                                           "viewportCount and scissorCount in pViewportState.");
3153        } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount !=
3154                   pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
3155            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3156                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3157                                "Gfx Pipeline viewport count (%u) must match scissor count (%u).",
3158                                pPipeline->graphicsPipelineCI.pViewportState->viewportCount,
3159                                pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3160        } else {
3161            // If viewport or scissor are not dynamic, then verify that data is appropriate for count
3162            bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3163            bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3164            if (!dynViewport) {
3165                if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount &&
3166                    !pPipeline->graphicsPipelineCI.pViewportState->pViewports) {
3167                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3168                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3169                                        "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you "
3170                                        "must either include pViewports data, or include viewport in pDynamicState and set it with "
3171                                        "vkCmdSetViewport().",
3172                                        pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
3173                }
3174            }
3175            if (!dynScissor) {
3176                if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount &&
3177                    !pPipeline->graphicsPipelineCI.pViewportState->pScissors) {
3178                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3179                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3180                                        "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you "
3181                                        "must either include pScissors data, or include scissor in pDynamicState and set it with "
3182                                        "vkCmdSetScissor().",
3183                                        pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3184                }
3185            }
3186        }
3187    }
3188    return skipCall;
3189}
3190
3191// Free the Pipeline nodes
3192static void deletePipelines(layer_data *my_data) {
3193    if (my_data->pipelineMap.size() <= 0)
3194        return;
3195    for (auto &pipe_map_pair : my_data->pipelineMap) {
3196        delete pipe_map_pair.second;
3197    }
3198    my_data->pipelineMap.clear();
3199}
3200
3201// For given pipeline, return number of MSAA samples, or one if MSAA disabled
3202static VkSampleCountFlagBits getNumSamples(layer_data *my_data, const VkPipeline pipeline) {
3203    PIPELINE_NODE *pPipe = my_data->pipelineMap[pipeline];
3204    if (pPipe->graphicsPipelineCI.pMultisampleState &&
3205        (VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pPipe->graphicsPipelineCI.pMultisampleState->sType)) {
3206        return pPipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
3207    }
3208    return VK_SAMPLE_COUNT_1_BIT;
3209}
3210
3211// Validate state related to the PSO
3212static bool validatePipelineState(layer_data *my_data, const GLOBAL_CB_NODE *pCB, const VkPipelineBindPoint pipelineBindPoint,
3213                                  const VkPipeline pipeline) {
3214    bool skipCall = false;
3215    if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
3216        // Verify that any MSAA request in PSO matches sample# in bound FB
3217        // Skip the check if rasterization is disabled.
3218        PIPELINE_NODE *pPipeline = my_data->pipelineMap[pipeline];
3219        if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3220            (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3221            VkSampleCountFlagBits psoNumSamples = getNumSamples(my_data, pipeline);
3222            if (pCB->activeRenderPass) {
3223                const VkRenderPassCreateInfo *pRPCI = my_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo;
3224                const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
3225                VkSampleCountFlagBits subpassNumSamples = (VkSampleCountFlagBits)0;
3226                uint32_t i;
3227
3228                const VkPipelineColorBlendStateCreateInfo *pColorBlendState = pPipeline->graphicsPipelineCI.pColorBlendState;
3229                if ((pColorBlendState != NULL) && (pCB->activeSubpass == pPipeline->graphicsPipelineCI.subpass) &&
3230                    (pColorBlendState->attachmentCount != pSD->colorAttachmentCount)) {
3231                    return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3232                                   reinterpret_cast<const uint64_t &>(pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
3233                                   "Render pass subpass %u mismatch with blending state defined  and blend state attachment "
3234                                   "count %u but subpass color attachment count %u!  These must be the same.",
3235                                   pCB->activeSubpass, pColorBlendState->attachmentCount, pSD->colorAttachmentCount);
3236                }
3237
3238                for (i = 0; i < pSD->colorAttachmentCount; i++) {
3239                    VkSampleCountFlagBits samples;
3240
3241                    if (pSD->pColorAttachments[i].attachment == VK_ATTACHMENT_UNUSED)
3242                        continue;
3243
3244                    samples = pRPCI->pAttachments[pSD->pColorAttachments[i].attachment].samples;
3245                    if (subpassNumSamples == (VkSampleCountFlagBits)0) {
3246                        subpassNumSamples = samples;
3247                    } else if (subpassNumSamples != samples) {
3248                        subpassNumSamples = (VkSampleCountFlagBits)-1;
3249                        break;
3250                    }
3251                }
3252                if (pSD->pDepthStencilAttachment && pSD->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3253                    const VkSampleCountFlagBits samples = pRPCI->pAttachments[pSD->pDepthStencilAttachment->attachment].samples;
3254                    if (subpassNumSamples == (VkSampleCountFlagBits)0)
3255                        subpassNumSamples = samples;
3256                    else if (subpassNumSamples != samples)
3257                        subpassNumSamples = (VkSampleCountFlagBits)-1;
3258                }
3259
3260                if ((pSD->colorAttachmentCount > 0 || pSD->pDepthStencilAttachment) &&
3261                    psoNumSamples != subpassNumSamples) {
3262                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3263                                        (uint64_t)pipeline, __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
3264                                        "Num samples mismatch! Binding PSO (%#" PRIxLEAST64
3265                                        ") with %u samples while current RenderPass (%#" PRIxLEAST64 ") w/ %u samples!",
3266                                        (uint64_t)pipeline, psoNumSamples, (uint64_t)pCB->activeRenderPass, subpassNumSamples);
3267                }
3268            } else {
3269                // TODO : I believe it's an error if we reach this point and don't have an activeRenderPass
3270                //   Verify and flag error as appropriate
3271            }
3272        }
3273        // TODO : Add more checks here
3274    } else {
3275        // TODO : Validate non-gfx pipeline updates
3276    }
3277    return skipCall;
3278}
3279
3280// Block of code at start here specifically for managing/tracking DSs
3281
3282// Return Pool node ptr for specified pool or else NULL
3283static DESCRIPTOR_POOL_NODE *getPoolNode(layer_data *my_data, const VkDescriptorPool pool) {
3284    if (my_data->descriptorPoolMap.find(pool) == my_data->descriptorPoolMap.end()) {
3285        return NULL;
3286    }
3287    return my_data->descriptorPoolMap[pool];
3288}
3289
3290static LAYOUT_NODE *getLayoutNode(layer_data *my_data, const VkDescriptorSetLayout layout) {
3291    if (my_data->descriptorSetLayoutMap.find(layout) == my_data->descriptorSetLayoutMap.end()) {
3292        return NULL;
3293    }
3294    return my_data->descriptorSetLayoutMap[layout];
3295}
3296
3297// Return false if update struct is of valid type, otherwise flag error and return code from callback
3298static bool validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3299    switch (pUpdateStruct->sType) {
3300    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3301    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3302        return false;
3303    default:
3304        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3305                       DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3306                       "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3307                       string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3308    }
3309}
3310
3311// Set count for given update struct in the last parameter
3312static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3313    switch (pUpdateStruct->sType) {
3314    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3315        return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3316    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3317        // TODO : Need to understand this case better and make sure code is correct
3318        return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3319    default:
3320        return 0;
3321    }
3322}
3323
3324// For given layout and update, return the first overall index of the layout that is updated
3325static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout, const uint32_t binding,
3326                                    const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3327    return getBindingStartIndex(pLayout, binding) + arrayIndex;
3328}
3329
3330// For given layout and update, return the last overall index of the layout that is updated
3331static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout, const uint32_t binding,
3332                                  const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3333    uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3334    return getBindingStartIndex(pLayout, binding) + arrayIndex + count - 1;
3335}
3336
3337// Verify that the descriptor type in the update struct matches what's expected by the layout
3338static bool validateUpdateConsistency(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout,
3339                                      const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3340    // First get actual type of update
3341    bool skipCall = false;
3342    VkDescriptorType actualType = VK_DESCRIPTOR_TYPE_MAX_ENUM;
3343    uint32_t i = 0;
3344    switch (pUpdateStruct->sType) {
3345    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3346        actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3347        break;
3348    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3349        /* no need to validate */
3350        return false;
3351        break;
3352    default:
3353        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3354                            DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3355                            "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3356                            string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3357    }
3358    if (!skipCall) {
3359        // Set first stageFlags as reference and verify that all other updates match it
3360        VkShaderStageFlags refStageFlags = pLayout->stageFlags[startIndex];
3361        for (i = startIndex; i <= endIndex; i++) {
3362            if (pLayout->descriptorTypes[i] != actualType) {
3363                skipCall |= log_msg(
3364                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3365                    DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3366                    "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3367                    string_VkDescriptorType(actualType), string_VkDescriptorType(pLayout->descriptorTypes[i]));
3368            }
3369            if (pLayout->stageFlags[i] != refStageFlags) {
3370                skipCall |= log_msg(
3371                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3372                    DRAWSTATE_DESCRIPTOR_STAGEFLAGS_MISMATCH, "DS",
3373                    "Write descriptor update has stageFlags %x that do not match overlapping binding descriptor stageFlags of %x!",
3374                    refStageFlags, pLayout->stageFlags[i]);
3375            }
3376        }
3377    }
3378    return skipCall;
3379}
3380
3381// Determine the update type, allocate a new struct of that type, shadow the given pUpdate
3382//   struct into the pNewNode param. Return true if error condition encountered and callback signals early exit.
3383// NOTE : Calls to this function should be wrapped in mutex
3384static bool shadowUpdateNode(layer_data *my_data, const VkDevice device, GENERIC_HEADER *pUpdate, GENERIC_HEADER **pNewNode) {
3385    bool skipCall = false;
3386    VkWriteDescriptorSet *pWDS = NULL;
3387    VkCopyDescriptorSet *pCDS = NULL;
3388    switch (pUpdate->sType) {
3389    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3390        pWDS = new VkWriteDescriptorSet;
3391        *pNewNode = (GENERIC_HEADER *)pWDS;
3392        memcpy(pWDS, pUpdate, sizeof(VkWriteDescriptorSet));
3393
3394        switch (pWDS->descriptorType) {
3395        case VK_DESCRIPTOR_TYPE_SAMPLER:
3396        case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3397        case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3398        case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
3399            VkDescriptorImageInfo *info = new VkDescriptorImageInfo[pWDS->descriptorCount];
3400            memcpy(info, pWDS->pImageInfo, pWDS->descriptorCount * sizeof(VkDescriptorImageInfo));
3401            pWDS->pImageInfo = info;
3402        } break;
3403        case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
3404        case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
3405            VkBufferView *info = new VkBufferView[pWDS->descriptorCount];
3406            memcpy(info, pWDS->pTexelBufferView, pWDS->descriptorCount * sizeof(VkBufferView));
3407            pWDS->pTexelBufferView = info;
3408        } break;
3409        case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3410        case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3411        case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
3412        case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
3413            VkDescriptorBufferInfo *info = new VkDescriptorBufferInfo[pWDS->descriptorCount];
3414            memcpy(info, pWDS->pBufferInfo, pWDS->descriptorCount * sizeof(VkDescriptorBufferInfo));
3415            pWDS->pBufferInfo = info;
3416        } break;
3417        default:
3418            return true;
3419            break;
3420        }
3421        break;
3422    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3423        pCDS = new VkCopyDescriptorSet;
3424        *pNewNode = (GENERIC_HEADER *)pCDS;
3425        memcpy(pCDS, pUpdate, sizeof(VkCopyDescriptorSet));
3426        break;
3427    default:
3428        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3429                    DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3430                    "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3431                    string_VkStructureType(pUpdate->sType), pUpdate->sType))
3432            return true;
3433    }
3434    // Make sure that pNext for the end of shadow copy is NULL
3435    (*pNewNode)->pNext = NULL;
3436    return skipCall;
3437}
3438
3439// Verify that given sampler is valid
3440static bool validateSampler(const layer_data *my_data, const VkSampler *pSampler, const bool immutable) {
3441    bool skipCall = false;
3442    auto sampIt = my_data->sampleMap.find(*pSampler);
3443    if (sampIt == my_data->sampleMap.end()) {
3444        if (!immutable) {
3445            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3446                                (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS",
3447                                "vkUpdateDescriptorSets: Attempt to update descriptor with invalid sampler %#" PRIxLEAST64,
3448                                (uint64_t)*pSampler);
3449        } else { // immutable
3450            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3451                                (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS",
3452                                "vkUpdateDescriptorSets: Attempt to update descriptor whose binding has an invalid immutable "
3453                                "sampler %#" PRIxLEAST64,
3454                                (uint64_t)*pSampler);
3455        }
3456    } else {
3457        // TODO : Any further checks we want to do on the sampler?
3458    }
3459    return skipCall;
3460}
3461
3462//TODO: Consolidate functions
3463bool FindLayout(const GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) {
3464    layer_data *my_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
3465    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3466        return false;
3467    }
3468    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3469    imgpair.subresource.aspectMask = aspectMask;
3470    auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3471    if (imgsubIt == pCB->imageLayoutMap.end()) {
3472        return false;
3473    }
3474    if (node.layout != VK_IMAGE_LAYOUT_MAX_ENUM && node.layout != imgsubIt->second.layout) {
3475        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3476                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3477                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3478                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.layout), string_VkImageLayout(imgsubIt->second.layout));
3479    }
3480    if (node.initialLayout != VK_IMAGE_LAYOUT_MAX_ENUM && node.initialLayout != imgsubIt->second.initialLayout) {
3481        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3482                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3483                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple initial layout types: %s and %s",
3484                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.initialLayout), string_VkImageLayout(imgsubIt->second.initialLayout));
3485    }
3486    node = imgsubIt->second;
3487    return true;
3488}
3489
3490bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) {
3491    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3492        return false;
3493    }
3494    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3495    imgpair.subresource.aspectMask = aspectMask;
3496    auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3497    if (imgsubIt == my_data->imageLayoutMap.end()) {
3498        return false;
3499    }
3500    if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) {
3501        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3502                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3503                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3504                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(layout), string_VkImageLayout(imgsubIt->second.layout));
3505    }
3506    layout = imgsubIt->second.layout;
3507    return true;
3508}
3509
3510// find layout(s) on the cmd buf level
3511bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3512    ImageSubresourcePair imgpair = {image, true, range};
3513    node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM);
3514    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT);
3515    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_DEPTH_BIT);
3516    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_STENCIL_BIT);
3517    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_METADATA_BIT);
3518    if (node.layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3519        imgpair = {image, false, VkImageSubresource()};
3520        auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3521        if (imgsubIt == pCB->imageLayoutMap.end())
3522            return false;
3523        node = imgsubIt->second;
3524    }
3525    return true;
3526}
3527
3528// find layout(s) on the global level
3529bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3530    layout = VK_IMAGE_LAYOUT_MAX_ENUM;
3531    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3532    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3533    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3534    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3535    if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3536        imgpair = {imgpair.image, false, VkImageSubresource()};
3537        auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3538        if (imgsubIt == my_data->imageLayoutMap.end())
3539            return false;
3540        layout = imgsubIt->second.layout;
3541    }
3542    return true;
3543}
3544
3545bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3546    ImageSubresourcePair imgpair = {image, true, range};
3547    return FindLayout(my_data, imgpair, layout);
3548}
3549
3550bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3551    auto sub_data = my_data->imageSubresourceMap.find(image);
3552    if (sub_data == my_data->imageSubresourceMap.end())
3553        return false;
3554    auto imgIt = my_data->imageMap.find(image);
3555    if (imgIt == my_data->imageMap.end())
3556        return false;
3557    bool ignoreGlobal = false;
3558    // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3559    // potential errors in this case.
3560    if (sub_data->second.size() >= (imgIt->second.createInfo.arrayLayers * imgIt->second.createInfo.mipLevels + 1)) {
3561        ignoreGlobal = true;
3562    }
3563    for (auto imgsubpair : sub_data->second) {
3564        if (ignoreGlobal && !imgsubpair.hasSubresource)
3565            continue;
3566        auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3567        if (img_data != my_data->imageLayoutMap.end()) {
3568            layouts.push_back(img_data->second.layout);
3569        }
3570    }
3571    return true;
3572}
3573
3574// Set the layout on the global level
3575void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3576    VkImage &image = imgpair.image;
3577    // TODO (mlentine): Maybe set format if new? Not used atm.
3578    my_data->imageLayoutMap[imgpair].layout = layout;
3579    // TODO (mlentine): Maybe make vector a set?
3580    auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3581    if (subresource == my_data->imageSubresourceMap[image].end()) {
3582        my_data->imageSubresourceMap[image].push_back(imgpair);
3583    }
3584}
3585
3586// Set the layout on the cmdbuf level
3587void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3588    pCB->imageLayoutMap[imgpair] = node;
3589    // TODO (mlentine): Maybe make vector a set?
3590    auto subresource =
3591        std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair);
3592    if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) {
3593        pCB->imageSubresourceMap[imgpair.image].push_back(imgpair);
3594    }
3595}
3596
3597void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3598    // TODO (mlentine): Maybe make vector a set?
3599    if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) !=
3600        pCB->imageSubresourceMap[imgpair.image].end()) {
3601        pCB->imageLayoutMap[imgpair].layout = layout;
3602    } else {
3603        // TODO (mlentine): Could be expensive and might need to be removed.
3604        assert(imgpair.hasSubresource);
3605        IMAGE_CMD_BUF_LAYOUT_NODE node;
3606        if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) {
3607            node.initialLayout = layout;
3608        }
3609        SetLayout(pCB, imgpair, {node.initialLayout, layout});
3610    }
3611}
3612
3613template <class OBJECT, class LAYOUT>
3614void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
3615    if (imgpair.subresource.aspectMask & aspectMask) {
3616        imgpair.subresource.aspectMask = aspectMask;
3617        SetLayout(pObject, imgpair, layout);
3618    }
3619}
3620
3621template <class OBJECT, class LAYOUT>
3622void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
3623    ImageSubresourcePair imgpair = {image, true, range};
3624    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3625    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3626    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3627    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3628}
3629
3630template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) {
3631    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3632    SetLayout(pObject, image, imgpair, layout);
3633}
3634
3635void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
3636    auto image_view_data = dev_data->imageViewMap.find(imageView);
3637    assert(image_view_data != dev_data->imageViewMap.end());
3638    const VkImage &image = image_view_data->second.image;
3639    const VkImageSubresourceRange &subRange = image_view_data->second.subresourceRange;
3640    // TODO: Do not iterate over every possibility - consolidate where possible
3641    for (uint32_t j = 0; j < subRange.levelCount; j++) {
3642        uint32_t level = subRange.baseMipLevel + j;
3643        for (uint32_t k = 0; k < subRange.layerCount; k++) {
3644            uint32_t layer = subRange.baseArrayLayer + k;
3645            VkImageSubresource sub = {subRange.aspectMask, level, layer};
3646            SetLayout(pCB, image, sub, layout);
3647        }
3648    }
3649}
3650
3651// Verify that given imageView is valid
3652static bool validateImageView(const layer_data *my_data, const VkImageView *pImageView, const VkImageLayout imageLayout) {
3653    bool skipCall = false;
3654    auto ivIt = my_data->imageViewMap.find(*pImageView);
3655    if (ivIt == my_data->imageViewMap.end()) {
3656        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3657                            (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3658                            "vkUpdateDescriptorSets: Attempt to update descriptor with invalid imageView %#" PRIxLEAST64,
3659                            (uint64_t)*pImageView);
3660    } else {
3661        // Validate that imageLayout is compatible with aspectMask and image format
3662        VkImageAspectFlags aspectMask = ivIt->second.subresourceRange.aspectMask;
3663        VkImage image = ivIt->second.image;
3664        // TODO : Check here in case we have a bad image
3665        VkFormat format = VK_FORMAT_MAX_ENUM;
3666        auto imgIt = my_data->imageMap.find(image);
3667        if (imgIt != my_data->imageMap.end()) {
3668            format = (*imgIt).second.createInfo.format;
3669        } else {
3670            // Also need to check the swapchains.
3671            auto swapchainIt = my_data->device_extensions.imageToSwapchainMap.find(image);
3672            if (swapchainIt != my_data->device_extensions.imageToSwapchainMap.end()) {
3673                VkSwapchainKHR swapchain = swapchainIt->second;
3674                auto swapchain_nodeIt = my_data->device_extensions.swapchainMap.find(swapchain);
3675                if (swapchain_nodeIt != my_data->device_extensions.swapchainMap.end()) {
3676                    SWAPCHAIN_NODE *pswapchain_node = swapchain_nodeIt->second;
3677                    format = pswapchain_node->createInfo.imageFormat;
3678                }
3679            }
3680        }
3681        if (format == VK_FORMAT_MAX_ENUM) {
3682            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3683                                (uint64_t)image, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3684                                "vkUpdateDescriptorSets: Attempt to update descriptor with invalid image %#" PRIxLEAST64
3685                                " in imageView %#" PRIxLEAST64,
3686                                (uint64_t)image, (uint64_t)*pImageView);
3687        } else {
3688            bool ds = vk_format_is_depth_or_stencil(format);
3689            switch (imageLayout) {
3690            case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
3691                // Only Color bit must be set
3692                if ((aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) {
3693                    skipCall |=
3694                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3695                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3696                                "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL "
3697                                "and imageView %#" PRIxLEAST64 ""
3698                                " that does not have VK_IMAGE_ASPECT_COLOR_BIT set.",
3699                                (uint64_t)*pImageView);
3700                }
3701                // format must NOT be DS
3702                if (ds) {
3703                    skipCall |=
3704                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3705                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3706                                "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL "
3707                                "and imageView %#" PRIxLEAST64 ""
3708                                " but the image format is %s which is not a color format.",
3709                                (uint64_t)*pImageView, string_VkFormat(format));
3710                }
3711                break;
3712            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
3713            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
3714                // Depth or stencil bit must be set, but both must NOT be set
3715                if (aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
3716                    if (aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) {
3717                        // both  must NOT be set
3718                        skipCall |=
3719                            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3720                                    (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3721                                    "vkUpdateDescriptorSets: Updating descriptor with imageView %#" PRIxLEAST64 ""
3722                                    " that has both STENCIL and DEPTH aspects set",
3723                                    (uint64_t)*pImageView);
3724                    }
3725                } else if (!(aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
3726                    // Neither were set
3727                    skipCall |=
3728                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3729                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3730                                "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 ""
3731                                " that does not have STENCIL or DEPTH aspect set.",
3732                                string_VkImageLayout(imageLayout), (uint64_t)*pImageView);
3733                }
3734                // format must be DS
3735                if (!ds) {
3736                    skipCall |=
3737                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3738                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3739                                "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 ""
3740                                " but the image format is %s which is not a depth/stencil format.",
3741                                string_VkImageLayout(imageLayout), (uint64_t)*pImageView, string_VkFormat(format));
3742                }
3743                break;
3744            default:
3745                // anything to check for other layouts?
3746                break;
3747            }
3748        }
3749    }
3750    return skipCall;
3751}
3752
3753// Verify that given bufferView is valid
3754static bool validateBufferView(const layer_data *my_data, const VkBufferView *pBufferView) {
3755    bool skipCall = false;
3756    auto sampIt = my_data->bufferViewMap.find(*pBufferView);
3757    if (sampIt == my_data->bufferViewMap.end()) {
3758        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT,
3759                            (uint64_t)*pBufferView, __LINE__, DRAWSTATE_BUFFERVIEW_DESCRIPTOR_ERROR, "DS",
3760                            "vkUpdateDescriptorSets: Attempt to update descriptor with invalid bufferView %#" PRIxLEAST64,
3761                            (uint64_t)*pBufferView);
3762    } else {
3763        // TODO : Any further checks we want to do on the bufferView?
3764    }
3765    return skipCall;
3766}
3767
3768// Verify that given bufferInfo is valid
3769static bool validateBufferInfo(const layer_data *my_data, const VkDescriptorBufferInfo *pBufferInfo) {
3770    bool skipCall = false;
3771    auto sampIt = my_data->bufferMap.find(pBufferInfo->buffer);
3772    if (sampIt == my_data->bufferMap.end()) {
3773        skipCall |=
3774            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3775                    (uint64_t)pBufferInfo->buffer, __LINE__, DRAWSTATE_BUFFERINFO_DESCRIPTOR_ERROR, "DS",
3776                    "vkUpdateDescriptorSets: Attempt to update descriptor where bufferInfo has invalid buffer %#" PRIxLEAST64,
3777                    (uint64_t)pBufferInfo->buffer);
3778    } else {
3779        // TODO : Any further checks we want to do on the bufferView?
3780    }
3781    return skipCall;
3782}
3783
3784static bool validateUpdateContents(const layer_data *my_data, const VkWriteDescriptorSet *pWDS,
3785                                   const VkDescriptorSetLayoutBinding *pLayoutBinding) {
3786    bool skipCall = false;
3787    // First verify that for the given Descriptor type, the correct DescriptorInfo data is supplied
3788    const VkSampler *pSampler = NULL;
3789    bool immutable = false;
3790    uint32_t i = 0;
3791    // For given update type, verify that update contents are correct
3792    switch (pWDS->descriptorType) {
3793    case VK_DESCRIPTOR_TYPE_SAMPLER:
3794        for (i = 0; i < pWDS->descriptorCount; ++i) {
3795            skipCall |= validateSampler(my_data, &(pWDS->pImageInfo[i].sampler), immutable);
3796        }
3797        break;
3798    case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3799        for (i = 0; i < pWDS->descriptorCount; ++i) {
3800            if (NULL == pLayoutBinding->pImmutableSamplers) {
3801                pSampler = &(pWDS->pImageInfo[i].sampler);
3802                if (immutable) {
3803                    skipCall |= log_msg(
3804                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3805                        (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS",
3806                        "vkUpdateDescriptorSets: Update #%u is not an immutable sampler %#" PRIxLEAST64
3807                        ", but previous update(s) from this "
3808                        "VkWriteDescriptorSet struct used an immutable sampler. All updates from a single struct must either "
3809                        "use immutable or non-immutable samplers.",
3810                        i, (uint64_t)*pSampler);
3811                }
3812            } else {
3813                if (i > 0 && !immutable) {
3814                    skipCall |= log_msg(
3815                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3816                        (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS",
3817                        "vkUpdateDescriptorSets: Update #%u is an immutable sampler, but previous update(s) from this "
3818                        "VkWriteDescriptorSet struct used a non-immutable sampler. All updates from a single struct must either "
3819                        "use immutable or non-immutable samplers.",
3820                        i);
3821                }
3822                immutable = true;
3823                pSampler = &(pLayoutBinding->pImmutableSamplers[i]);
3824            }
3825            skipCall |= validateSampler(my_data, pSampler, immutable);
3826        }
3827    // Intentionally fall through here to also validate image stuff
3828    case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3829    case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
3830    case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
3831        for (i = 0; i < pWDS->descriptorCount; ++i) {
3832            skipCall |= validateImageView(my_data, &(pWDS->pImageInfo[i].imageView), pWDS->pImageInfo[i].imageLayout);
3833        }
3834        break;
3835    case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
3836    case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
3837        for (i = 0; i < pWDS->descriptorCount; ++i) {
3838            skipCall |= validateBufferView(my_data, &(pWDS->pTexelBufferView[i]));
3839        }
3840        break;
3841    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3842    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3843    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
3844    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
3845        for (i = 0; i < pWDS->descriptorCount; ++i) {
3846            skipCall |= validateBufferInfo(my_data, &(pWDS->pBufferInfo[i]));
3847        }
3848        break;
3849    default:
3850        break;
3851    }
3852    return skipCall;
3853}
3854// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3855// func_str is the name of the calling function
3856// Return false if no errors occur
3857// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
3858static bool validateIdleDescriptorSet(const layer_data *my_data, VkDescriptorSet set, std::string func_str) {
3859    bool skip_call = false;
3860    auto set_node = my_data->setMap.find(set);
3861    if (set_node == my_data->setMap.end()) {
3862        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3863                             (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3864                             "Cannot call %s() on descriptor set %" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3865                             (uint64_t)(set));
3866    } else {
3867        if (set_node->second->in_use.load()) {
3868            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3869                                 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_OBJECT_INUSE,
3870                                 "DS", "Cannot call %s() on descriptor set %" PRIxLEAST64 " that is in use by a command buffer.",
3871                                 func_str.c_str(), (uint64_t)(set));
3872        }
3873    }
3874    return skip_call;
3875}
3876static void invalidateBoundCmdBuffers(layer_data *dev_data, const SET_NODE *pSet) {
3877    // Flag any CBs this set is bound to as INVALID
3878    for (auto cb : pSet->boundCmdBuffers) {
3879        auto cb_node = dev_data->commandBufferMap.find(cb);
3880        if (cb_node != dev_data->commandBufferMap.end()) {
3881            cb_node->second->state = CB_INVALID;
3882        }
3883    }
3884}
3885// update DS mappings based on write and copy update arrays
3886static bool dsUpdate(layer_data *my_data, VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pWDS,
3887                     uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pCDS) {
3888    bool skipCall = false;
3889
3890    LAYOUT_NODE *pLayout = NULL;
3891    VkDescriptorSetLayoutCreateInfo *pLayoutCI = NULL;
3892    // Validate Write updates
3893    uint32_t i = 0;
3894    for (i = 0; i < descriptorWriteCount; i++) {
3895        VkDescriptorSet ds = pWDS[i].dstSet;
3896        SET_NODE *pSet = my_data->setMap[ds];
3897        // Set being updated cannot be in-flight
3898        if ((skipCall = validateIdleDescriptorSet(my_data, ds, "VkUpdateDescriptorSets")) == true)
3899            return skipCall;
3900        // If set is bound to any cmdBuffers, mark them invalid
3901        invalidateBoundCmdBuffers(my_data, pSet);
3902        GENERIC_HEADER *pUpdate = (GENERIC_HEADER *)&pWDS[i];
3903        pLayout = pSet->pLayout;
3904        // First verify valid update struct
3905        if ((skipCall = validUpdateStruct(my_data, device, pUpdate)) == true) {
3906            break;
3907        }
3908        uint32_t binding = 0, endIndex = 0;
3909        binding = pWDS[i].dstBinding;
3910        auto bindingToIndex = pLayout->bindingToIndexMap.find(binding);
3911        // Make sure that layout being updated has the binding being updated
3912        if (bindingToIndex == pLayout->bindingToIndexMap.end()) {
3913            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3914                                (uint64_t)(ds), __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
3915                                "Descriptor Set %" PRIu64 " does not have binding to match "
3916                                "update binding %u for update type "
3917                                "%s!",
3918                                (uint64_t)(ds), binding, string_VkStructureType(pUpdate->sType));
3919        } else {
3920            // Next verify that update falls within size of given binding
3921            endIndex = getUpdateEndIndex(my_data, device, pLayout, binding, pWDS[i].dstArrayElement, pUpdate);
3922            if (getBindingEndIndex(pLayout, binding) < endIndex) {
3923                pLayoutCI = &pLayout->createInfo;
3924                string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS}    ");
3925                skipCall |=
3926                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3927                            (uint64_t)(ds), __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
3928                            "Descriptor update type of %s is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
3929                            string_VkStructureType(pUpdate->sType), binding, DSstr.c_str());
3930            } else { // TODO : should we skip update on a type mismatch or force it?
3931                uint32_t startIndex;
3932                startIndex = getUpdateStartIndex(my_data, device, pLayout, binding, pWDS[i].dstArrayElement, pUpdate);
3933                // Layout bindings match w/ update, now verify that update type
3934                // & stageFlags are the same for entire update
3935                if ((skipCall = validateUpdateConsistency(my_data, device, pLayout, pUpdate, startIndex, endIndex)) == false) {
3936                    // The update is within bounds and consistent, but need to
3937                    // make sure contents make sense as well
3938                    if ((skipCall = validateUpdateContents(my_data, &pWDS[i],
3939                                                           &pLayout->createInfo.pBindings[bindingToIndex->second])) == false) {
3940                        // Update is good. Save the update info
3941                        // Create new update struct for this set's shadow copy
3942                        GENERIC_HEADER *pNewNode = NULL;
3943                        skipCall |= shadowUpdateNode(my_data, device, pUpdate, &pNewNode);
3944                        if (NULL == pNewNode) {
3945                            skipCall |= log_msg(
3946                                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3947                                (uint64_t)(ds), __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
3948                                "Out of memory while attempting to allocate UPDATE struct in vkUpdateDescriptors()");
3949                        } else {
3950                            // Insert shadow node into LL of updates for this set
3951                            pNewNode->pNext = pSet->pUpdateStructs;
3952                            pSet->pUpdateStructs = pNewNode;
3953                            // Now update appropriate descriptor(s) to point to new Update node
3954                            for (uint32_t j = startIndex; j <= endIndex; j++) {
3955                                assert(j < pSet->descriptorCount);
3956                                pSet->pDescriptorUpdates[j] = pNewNode;
3957                            }
3958                        }
3959                    }
3960                }
3961            }
3962        }
3963    }
3964    // Now validate copy updates
3965    for (i = 0; i < descriptorCopyCount; ++i) {
3966        SET_NODE *pSrcSet = NULL, *pDstSet = NULL;
3967        LAYOUT_NODE *pSrcLayout = NULL, *pDstLayout = NULL;
3968        uint32_t srcStartIndex = 0, srcEndIndex = 0, dstStartIndex = 0, dstEndIndex = 0;
3969        // For each copy make sure that update falls within given layout and that types match
3970        pSrcSet = my_data->setMap[pCDS[i].srcSet];
3971        pDstSet = my_data->setMap[pCDS[i].dstSet];
3972        // Set being updated cannot be in-flight
3973        if ((skipCall = validateIdleDescriptorSet(my_data, pDstSet->set, "VkUpdateDescriptorSets")) == true)
3974            return skipCall;
3975        invalidateBoundCmdBuffers(my_data, pDstSet);
3976        pSrcLayout = pSrcSet->pLayout;
3977        pDstLayout = pDstSet->pLayout;
3978        // Validate that src binding is valid for src set layout
3979        if (pSrcLayout->bindingToIndexMap.find(pCDS[i].srcBinding) == pSrcLayout->bindingToIndexMap.end()) {
3980            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3981                                (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
3982                                "Copy descriptor update %u has srcBinding %u "
3983                                "which is out of bounds for underlying SetLayout "
3984                                "%#" PRIxLEAST64 " which only has bindings 0-%u.",
3985                                i, pCDS[i].srcBinding, (uint64_t)pSrcLayout->layout, pSrcLayout->createInfo.bindingCount - 1);
3986        } else if (pDstLayout->bindingToIndexMap.find(pCDS[i].dstBinding) == pDstLayout->bindingToIndexMap.end()) {
3987            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3988                                (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
3989                                "Copy descriptor update %u has dstBinding %u "
3990                                "which is out of bounds for underlying SetLayout "
3991                                "%#" PRIxLEAST64 " which only has bindings 0-%u.",
3992                                i, pCDS[i].dstBinding, (uint64_t)pDstLayout->layout, pDstLayout->createInfo.bindingCount - 1);
3993        } else {
3994            // Proceed with validation. Bindings are ok, but make sure update is within bounds of given layout
3995            srcEndIndex = getUpdateEndIndex(my_data, device, pSrcLayout, pCDS[i].srcBinding, pCDS[i].srcArrayElement,
3996                                            (const GENERIC_HEADER *)&(pCDS[i]));
3997            dstEndIndex = getUpdateEndIndex(my_data, device, pDstLayout, pCDS[i].dstBinding, pCDS[i].dstArrayElement,
3998                                            (const GENERIC_HEADER *)&(pCDS[i]));
3999            if (getBindingEndIndex(pSrcLayout, pCDS[i].srcBinding) < srcEndIndex) {
4000                pLayoutCI = &pSrcLayout->createInfo;
4001                string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS}    ");
4002                skipCall |=
4003                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4004                            (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
4005                            "Copy descriptor src update is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
4006                            pCDS[i].srcBinding, DSstr.c_str());
4007            } else if (getBindingEndIndex(pDstLayout, pCDS[i].dstBinding) < dstEndIndex) {
4008                pLayoutCI = &pDstLayout->createInfo;
4009                string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS}    ");
4010                skipCall |=
4011                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4012                            (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
4013                            "Copy descriptor dest update is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
4014                            pCDS[i].dstBinding, DSstr.c_str());
4015            } else {
4016                srcStartIndex = getUpdateStartIndex(my_data, device, pSrcLayout, pCDS[i].srcBinding, pCDS[i].srcArrayElement,
4017                                                    (const GENERIC_HEADER *)&(pCDS[i]));
4018                dstStartIndex = getUpdateStartIndex(my_data, device, pDstLayout, pCDS[i].dstBinding, pCDS[i].dstArrayElement,
4019                                                    (const GENERIC_HEADER *)&(pCDS[i]));
4020                for (uint32_t j = 0; j < pCDS[i].descriptorCount; ++j) {
4021                    // For copy just make sure that the types match and then perform the update
4022                    if (pSrcLayout->descriptorTypes[srcStartIndex + j] != pDstLayout->descriptorTypes[dstStartIndex + j]) {
4023                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4024                                            __LINE__, DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
4025                                            "Copy descriptor update index %u, update count #%u, has src update descriptor type %s "
4026                                            "that does not match overlapping dest descriptor type of %s!",
4027                                            i, j + 1, string_VkDescriptorType(pSrcLayout->descriptorTypes[srcStartIndex + j]),
4028                                            string_VkDescriptorType(pDstLayout->descriptorTypes[dstStartIndex + j]));
4029                    } else {
4030                        // point dst descriptor at corresponding src descriptor
4031                        // TODO : This may be a hole. I believe copy should be its own copy,
4032                        //  otherwise a subsequent write update to src will incorrectly affect the copy
4033                        pDstSet->pDescriptorUpdates[j + dstStartIndex] = pSrcSet->pDescriptorUpdates[j + srcStartIndex];
4034                        pDstSet->pUpdateStructs = pSrcSet->pUpdateStructs;
4035                    }
4036                }
4037            }
4038        }
4039    }
4040    return skipCall;
4041}
4042
4043// Verify that given pool has descriptors that are being requested for allocation.
4044// NOTE : Calls to this function should be wrapped in mutex
4045static bool validate_descriptor_availability_in_pool(layer_data *dev_data, DESCRIPTOR_POOL_NODE *pPoolNode, uint32_t count,
4046                                                     const VkDescriptorSetLayout *pSetLayouts) {
4047    bool skipCall = false;
4048    uint32_t i = 0;
4049    uint32_t j = 0;
4050
4051    // Track number of descriptorSets allowable in this pool
4052    if (pPoolNode->availableSets < count) {
4053        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4054                            reinterpret_cast<uint64_t &>(pPoolNode->pool), __LINE__, DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS",
4055                            "Unable to allocate %u descriptorSets from pool %#" PRIxLEAST64
4056                            ". This pool only has %d descriptorSets remaining.",
4057                            count, reinterpret_cast<uint64_t &>(pPoolNode->pool), pPoolNode->availableSets);
4058    } else {
4059        pPoolNode->availableSets -= count;
4060    }
4061
4062    for (i = 0; i < count; ++i) {
4063        LAYOUT_NODE *pLayout = getLayoutNode(dev_data, pSetLayouts[i]);
4064        if (NULL == pLayout) {
4065            skipCall |=
4066                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
4067                        (uint64_t)pSetLayouts[i], __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
4068                        "Unable to find set layout node for layout %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
4069                        (uint64_t)pSetLayouts[i]);
4070        } else {
4071            uint32_t typeIndex = 0, poolSizeCount = 0;
4072            for (j = 0; j < pLayout->createInfo.bindingCount; ++j) {
4073                typeIndex = static_cast<uint32_t>(pLayout->createInfo.pBindings[j].descriptorType);
4074                poolSizeCount = pLayout->createInfo.pBindings[j].descriptorCount;
4075                if (poolSizeCount > pPoolNode->availableDescriptorTypeCount[typeIndex]) {
4076                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4077                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)pLayout->layout, __LINE__,
4078                                        DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS",
4079                                        "Unable to allocate %u descriptors of type %s from pool %#" PRIxLEAST64
4080                                        ". This pool only has %d descriptors of this type remaining.",
4081                                        poolSizeCount, string_VkDescriptorType(pLayout->createInfo.pBindings[j].descriptorType),
4082                                        (uint64_t)pPoolNode->pool, pPoolNode->availableDescriptorTypeCount[typeIndex]);
4083                } else { // Decrement available descriptors of this type
4084                    pPoolNode->availableDescriptorTypeCount[typeIndex] -= poolSizeCount;
4085                }
4086            }
4087        }
4088    }
4089    return skipCall;
4090}
4091
4092// Free the shadowed update node for this Set
4093// NOTE : Calls to this function should be wrapped in mutex
4094static void freeShadowUpdateTree(SET_NODE *pSet) {
4095    GENERIC_HEADER *pShadowUpdate = pSet->pUpdateStructs;
4096    pSet->pUpdateStructs = NULL;
4097    GENERIC_HEADER *pFreeUpdate = pShadowUpdate;
4098    // Clear the descriptor mappings as they will now be invalid
4099    pSet->pDescriptorUpdates.clear();
4100    while (pShadowUpdate) {
4101        pFreeUpdate = pShadowUpdate;
4102        pShadowUpdate = (GENERIC_HEADER *)pShadowUpdate->pNext;
4103        VkWriteDescriptorSet *pWDS = NULL;
4104        switch (pFreeUpdate->sType) {
4105        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
4106            pWDS = (VkWriteDescriptorSet *)pFreeUpdate;
4107            switch (pWDS->descriptorType) {
4108            case VK_DESCRIPTOR_TYPE_SAMPLER:
4109            case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
4110            case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
4111            case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
4112                delete[] pWDS->pImageInfo;
4113            } break;
4114            case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
4115            case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
4116                delete[] pWDS->pTexelBufferView;
4117            } break;
4118            case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
4119            case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
4120            case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
4121            case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
4122                delete[] pWDS->pBufferInfo;
4123            } break;
4124            default:
4125                break;
4126            }
4127            break;
4128        case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
4129            break;
4130        default:
4131            assert(0);
4132            break;
4133        }
4134        delete pFreeUpdate;
4135    }
4136}
4137
4138// Free all DS Pools including their Sets & related sub-structs
4139// NOTE : Calls to this function should be wrapped in mutex
4140static void deletePools(layer_data *my_data) {
4141    if (my_data->descriptorPoolMap.size() <= 0)
4142        return;
4143    for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
4144        SET_NODE *pSet = (*ii).second->pSets;
4145        SET_NODE *pFreeSet = pSet;
4146        while (pSet) {
4147            pFreeSet = pSet;
4148            pSet = pSet->pNext;
4149            // Freeing layouts handled in deleteLayouts() function
4150            // Free Update shadow struct tree
4151            freeShadowUpdateTree(pFreeSet);
4152            delete pFreeSet;
4153        }
4154        delete (*ii).second;
4155    }
4156    my_data->descriptorPoolMap.clear();
4157}
4158
4159// WARN : Once deleteLayouts() called, any layout ptrs in Pool/Set data structure will be invalid
4160// NOTE : Calls to this function should be wrapped in mutex
4161static void deleteLayouts(layer_data *my_data) {
4162    if (my_data->descriptorSetLayoutMap.size() <= 0)
4163        return;
4164    for (auto ii = my_data->descriptorSetLayoutMap.begin(); ii != my_data->descriptorSetLayoutMap.end(); ++ii) {
4165        LAYOUT_NODE *pLayout = (*ii).second;
4166        if (pLayout->createInfo.pBindings) {
4167            for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
4168                delete[] pLayout->createInfo.pBindings[i].pImmutableSamplers;
4169            }
4170            delete[] pLayout->createInfo.pBindings;
4171        }
4172        delete pLayout;
4173    }
4174    my_data->descriptorSetLayoutMap.clear();
4175}
4176
4177// Currently clearing a set is removing all previous updates to that set
4178//  TODO : Validate if this is correct clearing behavior
4179static void clearDescriptorSet(layer_data *my_data, VkDescriptorSet set) {
4180    SET_NODE *pSet = getSetNode(my_data, set);
4181    if (!pSet) {
4182        // TODO : Return error
4183    } else {
4184        freeShadowUpdateTree(pSet);
4185    }
4186}
4187
4188static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
4189                                VkDescriptorPoolResetFlags flags) {
4190    DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pool);
4191    if (!pPool) {
4192        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4193                (uint64_t)pool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
4194                "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkResetDescriptorPool() call", (uint64_t)pool);
4195    } else {
4196        // TODO: validate flags
4197        // For every set off of this pool, clear it, remove from setMap, and free SET_NODE
4198        SET_NODE *pSet = pPool->pSets;
4199        SET_NODE *pFreeSet = pSet;
4200        while (pSet) {
4201            clearDescriptorSet(my_data, pSet->set);
4202            my_data->setMap.erase(pSet->set);
4203            pFreeSet = pSet;
4204            pSet = pSet->pNext;
4205            delete pFreeSet;
4206        }
4207        pPool->pSets = nullptr;
4208        // Reset available count for each type and available sets for this pool
4209        for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
4210            pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
4211        }
4212        pPool->availableSets = pPool->maxSets;
4213    }
4214}
4215
4216// For given CB object, fetch associated CB Node from map
4217static GLOBAL_CB_NODE *getCBNode(layer_data *my_data, const VkCommandBuffer cb) {
4218    if (my_data->commandBufferMap.count(cb) == 0) {
4219        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4220                reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4221                "Attempt to use CommandBuffer %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb));
4222        return NULL;
4223    }
4224    return my_data->commandBufferMap[cb];
4225}
4226
4227// Free all CB Nodes
4228// NOTE : Calls to this function should be wrapped in mutex
4229static void deleteCommandBuffers(layer_data *my_data) {
4230    if (my_data->commandBufferMap.empty()) {
4231        return;
4232    }
4233    for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
4234        delete (*ii).second;
4235    }
4236    my_data->commandBufferMap.clear();
4237}
4238
4239static bool report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
4240    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4241                   (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
4242                   "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
4243}
4244
4245bool validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
4246    if (!pCB->activeRenderPass)
4247        return false;
4248    bool skip_call = false;
4249    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS && cmd_type != CMD_EXECUTECOMMANDS) {
4250        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4251                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4252                             "Commands cannot be called in a subpass using secondary command buffers.");
4253    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
4254        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4255                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4256                             "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
4257    }
4258    return skip_call;
4259}
4260
4261static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4262    if (!(flags & VK_QUEUE_GRAPHICS_BIT))
4263        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4264                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4265                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
4266    return false;
4267}
4268
4269static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4270    if (!(flags & VK_QUEUE_COMPUTE_BIT))
4271        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4272                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4273                       "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
4274    return false;
4275}
4276
4277static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4278    if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
4279        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4280                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4281                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
4282    return false;
4283}
4284
4285// Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not
4286//  in the recording state or if there's an issue with the Cmd ordering
4287static bool addCmd(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
4288    bool skipCall = false;
4289    auto pool_data = my_data->commandPoolMap.find(pCB->createInfo.commandPool);
4290    if (pool_data != my_data->commandPoolMap.end()) {
4291        VkQueueFlags flags = my_data->phys_dev_properties.queue_family_properties[pool_data->second.queueFamilyIndex].queueFlags;
4292        switch (cmd) {
4293        case CMD_BINDPIPELINE:
4294        case CMD_BINDPIPELINEDELTA:
4295        case CMD_BINDDESCRIPTORSETS:
4296        case CMD_FILLBUFFER:
4297        case CMD_CLEARCOLORIMAGE:
4298        case CMD_SETEVENT:
4299        case CMD_RESETEVENT:
4300        case CMD_WAITEVENTS:
4301        case CMD_BEGINQUERY:
4302        case CMD_ENDQUERY:
4303        case CMD_RESETQUERYPOOL:
4304        case CMD_COPYQUERYPOOLRESULTS:
4305        case CMD_WRITETIMESTAMP:
4306            skipCall |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
4307            break;
4308        case CMD_SETVIEWPORTSTATE:
4309        case CMD_SETSCISSORSTATE:
4310        case CMD_SETLINEWIDTHSTATE:
4311        case CMD_SETDEPTHBIASSTATE:
4312        case CMD_SETBLENDSTATE:
4313        case CMD_SETDEPTHBOUNDSSTATE:
4314        case CMD_SETSTENCILREADMASKSTATE:
4315        case CMD_SETSTENCILWRITEMASKSTATE:
4316        case CMD_SETSTENCILREFERENCESTATE:
4317        case CMD_BINDINDEXBUFFER:
4318        case CMD_BINDVERTEXBUFFER:
4319        case CMD_DRAW:
4320        case CMD_DRAWINDEXED:
4321        case CMD_DRAWINDIRECT:
4322        case CMD_DRAWINDEXEDINDIRECT:
4323        case CMD_BLITIMAGE:
4324        case CMD_CLEARATTACHMENTS:
4325        case CMD_CLEARDEPTHSTENCILIMAGE:
4326        case CMD_RESOLVEIMAGE:
4327        case CMD_BEGINRENDERPASS:
4328        case CMD_NEXTSUBPASS:
4329        case CMD_ENDRENDERPASS:
4330            skipCall |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
4331            break;
4332        case CMD_DISPATCH:
4333        case CMD_DISPATCHINDIRECT:
4334            skipCall |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
4335            break;
4336        case CMD_COPYBUFFER:
4337        case CMD_COPYIMAGE:
4338        case CMD_COPYBUFFERTOIMAGE:
4339        case CMD_COPYIMAGETOBUFFER:
4340        case CMD_CLONEIMAGEDATA:
4341        case CMD_UPDATEBUFFER:
4342        case CMD_PIPELINEBARRIER:
4343        case CMD_EXECUTECOMMANDS:
4344            break;
4345        default:
4346            break;
4347        }
4348    }
4349    if (pCB->state != CB_RECORDING) {
4350        skipCall |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
4351        skipCall |= validateCmdsInCmdBuffer(my_data, pCB, cmd);
4352        CMD_NODE cmdNode = {};
4353        // init cmd node and append to end of cmd LL
4354        cmdNode.cmdNumber = ++pCB->numCmds;
4355        cmdNode.type = cmd;
4356        pCB->cmds.push_back(cmdNode);
4357    }
4358    return skipCall;
4359}
4360// Reset the command buffer state
4361//  Maintain the createInfo and set state to CB_NEW, but clear all other state
4362static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
4363    GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
4364    if (pCB) {
4365        pCB->cmds.clear();
4366        // Reset CB state (note that createInfo is not cleared)
4367        pCB->commandBuffer = cb;
4368        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
4369        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
4370        pCB->numCmds = 0;
4371        memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
4372        pCB->state = CB_NEW;
4373        pCB->submitCount = 0;
4374        pCB->status = 0;
4375        pCB->viewports.clear();
4376        pCB->scissors.clear();
4377        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4378            // Before clearing lastBoundState, remove any CB bindings from all uniqueBoundSets
4379            for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4380                auto set_node = dev_data->setMap.find(set);
4381                if (set_node != dev_data->setMap.end()) {
4382                    set_node->second->boundCmdBuffers.erase(pCB->commandBuffer);
4383                }
4384            }
4385            pCB->lastBound[i].reset();
4386        }
4387        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
4388        pCB->activeRenderPass = 0;
4389        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
4390        pCB->activeSubpass = 0;
4391        pCB->framebuffer = 0;
4392        pCB->fenceId = 0;
4393        pCB->lastSubmittedFence = VK_NULL_HANDLE;
4394        pCB->lastSubmittedQueue = VK_NULL_HANDLE;
4395        pCB->destroyedSets.clear();
4396        pCB->updatedSets.clear();
4397        pCB->destroyedFramebuffers.clear();
4398        pCB->waitedEvents.clear();
4399        pCB->semaphores.clear();
4400        pCB->events.clear();
4401        pCB->waitedEventsBeforeQueryReset.clear();
4402        pCB->queryToStateMap.clear();
4403        pCB->activeQueries.clear();
4404        pCB->startedQueries.clear();
4405        pCB->imageLayoutMap.clear();
4406        pCB->eventToStageMap.clear();
4407        pCB->drawData.clear();
4408        pCB->currentDrawData.buffers.clear();
4409        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
4410        pCB->secondaryCommandBuffers.clear();
4411        pCB->updateImages.clear();
4412        pCB->updateBuffers.clear();
4413        clear_cmd_buf_and_mem_references(dev_data, pCB);
4414        pCB->eventUpdates.clear();
4415    }
4416}
4417
4418// Set PSO-related status bits for CB, including dynamic state set via PSO
4419static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe) {
4420    // Account for any dynamic state not set via this PSO
4421    if (!pPipe->graphicsPipelineCI.pDynamicState ||
4422        !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) { // All state is static
4423        pCB->status = CBSTATUS_ALL;
4424    } else {
4425        // First consider all state on
4426        // Then unset any state that's noted as dynamic in PSO
4427        // Finally OR that into CB statemask
4428        CBStatusFlags psoDynStateMask = CBSTATUS_ALL;
4429        for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
4430            switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) {
4431            case VK_DYNAMIC_STATE_VIEWPORT:
4432                psoDynStateMask &= ~CBSTATUS_VIEWPORT_SET;
4433                break;
4434            case VK_DYNAMIC_STATE_SCISSOR:
4435                psoDynStateMask &= ~CBSTATUS_SCISSOR_SET;
4436                break;
4437            case VK_DYNAMIC_STATE_LINE_WIDTH:
4438                psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
4439                break;
4440            case VK_DYNAMIC_STATE_DEPTH_BIAS:
4441                psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
4442                break;
4443            case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
4444                psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
4445                break;
4446            case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
4447                psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
4448                break;
4449            case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
4450                psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
4451                break;
4452            case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
4453                psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
4454                break;
4455            case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
4456                psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
4457                break;
4458            default:
4459                // TODO : Flag error here
4460                break;
4461            }
4462        }
4463        pCB->status |= psoDynStateMask;
4464    }
4465}
4466
4467// Print the last bound Gfx Pipeline
4468static bool printPipeline(layer_data *my_data, const VkCommandBuffer cb) {
4469    bool skipCall = false;
4470    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4471    if (pCB) {
4472        PIPELINE_NODE *pPipeTrav = getPipeline(my_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
4473        if (!pPipeTrav) {
4474            // nothing to print
4475        } else {
4476            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4477                                __LINE__, DRAWSTATE_NONE, "DS", "%s",
4478                                vk_print_vkgraphicspipelinecreateinfo(
4479                                    reinterpret_cast<const VkGraphicsPipelineCreateInfo *>(&pPipeTrav->graphicsPipelineCI), "{DS}")
4480                                    .c_str());
4481        }
4482    }
4483    return skipCall;
4484}
4485
4486static void printCB(layer_data *my_data, const VkCommandBuffer cb) {
4487    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4488    if (pCB && pCB->cmds.size() > 0) {
4489        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4490                DRAWSTATE_NONE, "DS", "Cmds in CB %p", (void *)cb);
4491        vector<CMD_NODE> cmds = pCB->cmds;
4492        for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) {
4493            // TODO : Need to pass cb as srcObj here
4494            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4495                    __LINE__, DRAWSTATE_NONE, "DS", "  CMD#%" PRIu64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str());
4496        }
4497    } else {
4498        // Nothing to print
4499    }
4500}
4501
4502static bool synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
4503    bool skipCall = false;
4504    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
4505        return skipCall;
4506    }
4507    skipCall |= printPipeline(my_data, cb);
4508    return skipCall;
4509}
4510
4511// Flags validation error if the associated call is made inside a render pass. The apiName
4512// routine should ONLY be called outside a render pass.
4513static bool insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4514    bool inside = false;
4515    if (pCB->activeRenderPass) {
4516        inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4517                         (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
4518                         "%s: It is invalid to issue this call inside an active render pass (%#" PRIxLEAST64 ")", apiName,
4519                         (uint64_t)pCB->activeRenderPass);
4520    }
4521    return inside;
4522}
4523
4524// Flags validation error if the associated call is made outside a render pass. The apiName
4525// routine should ONLY be called inside a render pass.
4526static bool outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4527    bool outside = false;
4528    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
4529        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
4530         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
4531        outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4532                          (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS",
4533                          "%s: This call must be issued inside an active render pass.", apiName);
4534    }
4535    return outside;
4536}
4537
4538static void init_core_validation(layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
4539
4540    layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
4541
4542    if (!globalLockInitialized) {
4543        loader_platform_thread_create_mutex(&globalLock);
4544        globalLockInitialized = 1;
4545    }
4546}
4547
4548VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4549vkCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
4550    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4551
4552    assert(chain_info->u.pLayerInfo);
4553    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4554    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
4555    if (fpCreateInstance == NULL)
4556        return VK_ERROR_INITIALIZATION_FAILED;
4557
4558    // Advance the link info for the next element on the chain
4559    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4560
4561    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
4562    if (result != VK_SUCCESS)
4563        return result;
4564
4565    layer_data *instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
4566    instance_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
4567    layer_init_instance_dispatch_table(*pInstance, instance_data->instance_dispatch_table, fpGetInstanceProcAddr);
4568
4569    instance_data->report_data =
4570        debug_report_create_instance(instance_data->instance_dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount,
4571                                     pCreateInfo->ppEnabledExtensionNames);
4572
4573    init_core_validation(instance_data, pAllocator);
4574
4575    ValidateLayerOrdering(*pCreateInfo);
4576
4577    return result;
4578}
4579
4580/* hook DestroyInstance to remove tableInstanceMap entry */
4581VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
4582    // TODOSC : Shouldn't need any customization here
4583    dispatch_key key = get_dispatch_key(instance);
4584    // TBD: Need any locking this early, in case this function is called at the
4585    // same time by more than one thread?
4586    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
4587    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
4588    pTable->DestroyInstance(instance, pAllocator);
4589
4590    loader_platform_thread_lock_mutex(&globalLock);
4591    // Clean up logging callback, if any
4592    while (my_data->logging_callback.size() > 0) {
4593        VkDebugReportCallbackEXT callback = my_data->logging_callback.back();
4594        layer_destroy_msg_callback(my_data->report_data, callback, pAllocator);
4595        my_data->logging_callback.pop_back();
4596    }
4597
4598    layer_debug_report_destroy_instance(my_data->report_data);
4599    delete my_data->instance_dispatch_table;
4600    layer_data_map.erase(key);
4601    loader_platform_thread_unlock_mutex(&globalLock);
4602    if (layer_data_map.empty()) {
4603        // Release mutex when destroying last instance.
4604        loader_platform_thread_delete_mutex(&globalLock);
4605        globalLockInitialized = 0;
4606    }
4607}
4608
4609static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
4610    uint32_t i;
4611    // TBD: Need any locking, in case this function is called at the same time
4612    // by more than one thread?
4613    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4614    dev_data->device_extensions.wsi_enabled = false;
4615
4616    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4617    PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
4618    pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
4619    pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
4620    pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
4621    pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
4622    pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
4623
4624    for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4625        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
4626            dev_data->device_extensions.wsi_enabled = true;
4627    }
4628}
4629
4630VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
4631                                                              const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
4632    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4633
4634    assert(chain_info->u.pLayerInfo);
4635    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4636    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
4637    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
4638    if (fpCreateDevice == NULL) {
4639        return VK_ERROR_INITIALIZATION_FAILED;
4640    }
4641
4642    // Advance the link info for the next element on the chain
4643    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4644
4645    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
4646    if (result != VK_SUCCESS) {
4647        return result;
4648    }
4649
4650    loader_platform_thread_lock_mutex(&globalLock);
4651    layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
4652    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
4653
4654    // Setup device dispatch table
4655    my_device_data->device_dispatch_table = new VkLayerDispatchTable;
4656    layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr);
4657    my_device_data->device = *pDevice;
4658
4659    my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
4660    createDeviceRegisterExtensions(pCreateInfo, *pDevice);
4661    // Get physical device limits for this device
4662    my_instance_data->instance_dispatch_table->GetPhysicalDeviceProperties(gpu, &(my_device_data->phys_dev_properties.properties));
4663    uint32_t count;
4664    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
4665    my_device_data->phys_dev_properties.queue_family_properties.resize(count);
4666    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(
4667        gpu, &count, &my_device_data->phys_dev_properties.queue_family_properties[0]);
4668    // TODO: device limits should make sure these are compatible
4669    if (pCreateInfo->pEnabledFeatures) {
4670        my_device_data->phys_dev_properties.features = *pCreateInfo->pEnabledFeatures;
4671    } else {
4672        memset(&my_device_data->phys_dev_properties.features, 0, sizeof(VkPhysicalDeviceFeatures));
4673    }
4674    // Store physical device mem limits into device layer_data struct
4675    my_instance_data->instance_dispatch_table->GetPhysicalDeviceMemoryProperties(gpu, &my_device_data->phys_dev_mem_props);
4676    loader_platform_thread_unlock_mutex(&globalLock);
4677
4678    ValidateLayerOrdering(*pCreateInfo);
4679
4680    return result;
4681}
4682
4683// prototype
4684static void deleteRenderPasses(layer_data *);
4685VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
4686    // TODOSC : Shouldn't need any customization here
4687    dispatch_key key = get_dispatch_key(device);
4688    layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4689    // Free all the memory
4690    loader_platform_thread_lock_mutex(&globalLock);
4691    deletePipelines(dev_data);
4692    deleteRenderPasses(dev_data);
4693    deleteCommandBuffers(dev_data);
4694    deletePools(dev_data);
4695    deleteLayouts(dev_data);
4696    dev_data->imageViewMap.clear();
4697    dev_data->imageMap.clear();
4698    dev_data->imageSubresourceMap.clear();
4699    dev_data->imageLayoutMap.clear();
4700    dev_data->bufferViewMap.clear();
4701    dev_data->bufferMap.clear();
4702    // Queues persist until device is destroyed
4703    dev_data->queueMap.clear();
4704    loader_platform_thread_unlock_mutex(&globalLock);
4705#if MTMERGESOURCE
4706    bool skipCall = false;
4707    loader_platform_thread_lock_mutex(&globalLock);
4708    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4709            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()");
4710    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4711            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================");
4712    print_mem_list(dev_data);
4713    printCBList(dev_data);
4714    // Report any memory leaks
4715    DEVICE_MEM_INFO *pInfo = NULL;
4716    if (!dev_data->memObjMap.empty()) {
4717        for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
4718            pInfo = &(*ii).second;
4719            if (pInfo->allocInfo.allocationSize != 0) {
4720                // Valid Usage: All child objects created on device must have been destroyed prior to destroying device
4721                skipCall |=
4722                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4723                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK,
4724                            "MEM", "Mem Object %" PRIu64 " has not been freed. You should clean up this memory by calling "
4725                                   "vkFreeMemory(%" PRIu64 ") prior to vkDestroyDevice().",
4726                            (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem));
4727            }
4728        }
4729    }
4730    layer_debug_report_destroy_device(device);
4731    loader_platform_thread_unlock_mutex(&globalLock);
4732
4733#if DISPATCH_MAP_DEBUG
4734    fprintf(stderr, "Device: %p, key: %p\n", device, key);
4735#endif
4736    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4737    if (!skipCall) {
4738        pDisp->DestroyDevice(device, pAllocator);
4739    }
4740#else
4741    dev_data->device_dispatch_table->DestroyDevice(device, pAllocator);
4742#endif
4743    delete dev_data->device_dispatch_table;
4744    layer_data_map.erase(key);
4745}
4746
4747static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4748
4749VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4750vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
4751    return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
4752}
4753
4754VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4755vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
4756    return util_GetLayerProperties(ARRAY_SIZE(cv_global_layers), cv_global_layers, pCount, pProperties);
4757}
4758
4759// TODO: Why does this exist - can we just use global?
4760static const VkLayerProperties cv_device_layers[] = {{
4761    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
4762}};
4763
4764VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
4765                                                                                    const char *pLayerName, uint32_t *pCount,
4766                                                                                    VkExtensionProperties *pProperties) {
4767    if (pLayerName == NULL) {
4768        dispatch_key key = get_dispatch_key(physicalDevice);
4769        layer_data *my_data = get_my_data_ptr(key, layer_data_map);
4770        return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
4771    } else {
4772        return util_GetExtensionProperties(0, NULL, pCount, pProperties);
4773    }
4774}
4775
4776VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4777vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
4778    /* draw_state physical device layers are the same as global */
4779    return util_GetLayerProperties(ARRAY_SIZE(cv_device_layers), cv_device_layers, pCount, pProperties);
4780}
4781
4782// This validates that the initial layout specified in the command buffer for
4783// the IMAGE is the same
4784// as the global IMAGE layout
4785static bool ValidateCmdBufImageLayouts(VkCommandBuffer cmdBuffer) {
4786    bool skip_call = false;
4787    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
4788    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
4789    for (auto cb_image_data : pCB->imageLayoutMap) {
4790        VkImageLayout imageLayout;
4791        if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4792            skip_call |=
4793                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4794                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image %" PRIu64 ".",
4795                        reinterpret_cast<const uint64_t &>(cb_image_data.first));
4796        } else {
4797            if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4798                // TODO: Set memory invalid which is in mem_tracker currently
4799            } else if (imageLayout != cb_image_data.second.initialLayout) {
4800                if (cb_image_data.first.hasSubresource) {
4801                    skip_call |= log_msg(
4802                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4803                        reinterpret_cast<uint64_t &>(cmdBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4804                        "Cannot submit cmd buffer using image (%" PRIx64 ") [sub-resource: array layer %u, mip level %u], "
4805                        "with layout %s when first use is %s.",
4806                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), cb_image_data.first.subresource.arrayLayer,
4807                        cb_image_data.first.subresource.mipLevel, string_VkImageLayout(imageLayout),
4808                        string_VkImageLayout(cb_image_data.second.initialLayout));
4809                } else {
4810                    skip_call |= log_msg(
4811                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4812                        reinterpret_cast<uint64_t &>(cmdBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4813                        "Cannot submit cmd buffer using image (%" PRIx64 ") with layout %s when "
4814                        "first use is %s.",
4815                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout),
4816                        string_VkImageLayout(cb_image_data.second.initialLayout));
4817                }
4818            }
4819            SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4820        }
4821    }
4822    return skip_call;
4823}
4824
4825// Track which resources are in-flight by atomically incrementing their "in_use" count
4826static bool validateAndIncrementResources(layer_data *my_data, GLOBAL_CB_NODE *pCB) {
4827    bool skip_call = false;
4828    for (auto drawDataElement : pCB->drawData) {
4829        for (auto buffer : drawDataElement.buffers) {
4830            auto buffer_data = my_data->bufferMap.find(buffer);
4831            if (buffer_data == my_data->bufferMap.end()) {
4832                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4833                                     (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4834                                     "Cannot submit cmd buffer using deleted buffer %" PRIu64 ".", (uint64_t)(buffer));
4835            } else {
4836                buffer_data->second.in_use.fetch_add(1);
4837            }
4838        }
4839    }
4840    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4841        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4842            auto setNode = my_data->setMap.find(set);
4843            if (setNode == my_data->setMap.end()) {
4844                skip_call |=
4845                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4846                            (uint64_t)(set), __LINE__, DRAWSTATE_INVALID_DESCRIPTOR_SET, "DS",
4847                            "Cannot submit cmd buffer using deleted descriptor set %" PRIu64 ".", (uint64_t)(set));
4848            } else {
4849                setNode->second->in_use.fetch_add(1);
4850            }
4851        }
4852    }
4853    for (auto semaphore : pCB->semaphores) {
4854        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4855        if (semaphoreNode == my_data->semaphoreMap.end()) {
4856            skip_call |=
4857                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4858                        reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
4859                        "Cannot submit cmd buffer using deleted semaphore %" PRIu64 ".", reinterpret_cast<uint64_t &>(semaphore));
4860        } else {
4861            semaphoreNode->second.in_use.fetch_add(1);
4862        }
4863    }
4864    for (auto event : pCB->events) {
4865        auto eventNode = my_data->eventMap.find(event);
4866        if (eventNode == my_data->eventMap.end()) {
4867            skip_call |=
4868                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4869                        reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
4870                        "Cannot submit cmd buffer using deleted event %" PRIu64 ".", reinterpret_cast<uint64_t &>(event));
4871        } else {
4872            eventNode->second.in_use.fetch_add(1);
4873        }
4874    }
4875    return skip_call;
4876}
4877
4878static void decrementResources(layer_data *my_data, VkCommandBuffer cmdBuffer) {
4879    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
4880    for (auto drawDataElement : pCB->drawData) {
4881        for (auto buffer : drawDataElement.buffers) {
4882            auto buffer_data = my_data->bufferMap.find(buffer);
4883            if (buffer_data != my_data->bufferMap.end()) {
4884                buffer_data->second.in_use.fetch_sub(1);
4885            }
4886        }
4887    }
4888    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4889        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4890            auto setNode = my_data->setMap.find(set);
4891            if (setNode != my_data->setMap.end()) {
4892                setNode->second->in_use.fetch_sub(1);
4893            }
4894        }
4895    }
4896    for (auto semaphore : pCB->semaphores) {
4897        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4898        if (semaphoreNode != my_data->semaphoreMap.end()) {
4899            semaphoreNode->second.in_use.fetch_sub(1);
4900        }
4901    }
4902    for (auto event : pCB->events) {
4903        auto eventNode = my_data->eventMap.find(event);
4904        if (eventNode != my_data->eventMap.end()) {
4905            eventNode->second.in_use.fetch_sub(1);
4906        }
4907    }
4908    for (auto queryStatePair : pCB->queryToStateMap) {
4909        my_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4910    }
4911    for (auto eventStagePair : pCB->eventToStageMap) {
4912        my_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4913    }
4914}
4915
4916static void decrementResources(layer_data *my_data, uint32_t fenceCount, const VkFence *pFences) {
4917    for (uint32_t i = 0; i < fenceCount; ++i) {
4918        auto fence_data = my_data->fenceMap.find(pFences[i]);
4919        if (fence_data == my_data->fenceMap.end() || !fence_data->second.needsSignaled)
4920            return;
4921        fence_data->second.needsSignaled = false;
4922        fence_data->second.in_use.fetch_sub(1);
4923        decrementResources(my_data, static_cast<uint32_t>(fence_data->second.priorFences.size()),
4924                           fence_data->second.priorFences.data());
4925        for (auto cmdBuffer : fence_data->second.cmdBuffers) {
4926            decrementResources(my_data, cmdBuffer);
4927        }
4928    }
4929}
4930
4931static void decrementResources(layer_data *my_data, VkQueue queue) {
4932    auto queue_data = my_data->queueMap.find(queue);
4933    if (queue_data != my_data->queueMap.end()) {
4934        for (auto cmdBuffer : queue_data->second.untrackedCmdBuffers) {
4935            decrementResources(my_data, cmdBuffer);
4936        }
4937        queue_data->second.untrackedCmdBuffers.clear();
4938        decrementResources(my_data, static_cast<uint32_t>(queue_data->second.lastFences.size()),
4939                           queue_data->second.lastFences.data());
4940    }
4941}
4942
4943static void updateTrackedCommandBuffers(layer_data *dev_data, VkQueue queue, VkQueue other_queue, VkFence fence) {
4944    if (queue == other_queue) {
4945        return;
4946    }
4947    auto queue_data = dev_data->queueMap.find(queue);
4948    auto other_queue_data = dev_data->queueMap.find(other_queue);
4949    if (queue_data == dev_data->queueMap.end() || other_queue_data == dev_data->queueMap.end()) {
4950        return;
4951    }
4952    for (auto fenceInner : other_queue_data->second.lastFences) {
4953        queue_data->second.lastFences.push_back(fenceInner);
4954    }
4955    if (fence != VK_NULL_HANDLE) {
4956        auto fence_data = dev_data->fenceMap.find(fence);
4957        if (fence_data == dev_data->fenceMap.end()) {
4958            return;
4959        }
4960        for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) {
4961            fence_data->second.cmdBuffers.push_back(cmdbuffer);
4962        }
4963        other_queue_data->second.untrackedCmdBuffers.clear();
4964    } else {
4965        for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) {
4966            queue_data->second.untrackedCmdBuffers.push_back(cmdbuffer);
4967        }
4968        other_queue_data->second.untrackedCmdBuffers.clear();
4969    }
4970    for (auto eventStagePair : other_queue_data->second.eventToStageMap) {
4971        queue_data->second.eventToStageMap[eventStagePair.first] = eventStagePair.second;
4972    }
4973}
4974
4975static void trackCommandBuffers(layer_data *my_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
4976                                VkFence fence) {
4977    auto queue_data = my_data->queueMap.find(queue);
4978    if (fence != VK_NULL_HANDLE) {
4979        vector<VkFence> prior_fences;
4980        auto fence_data = my_data->fenceMap.find(fence);
4981        if (fence_data == my_data->fenceMap.end()) {
4982            return;
4983        }
4984        if (queue_data != my_data->queueMap.end()) {
4985            prior_fences = queue_data->second.lastFences;
4986            queue_data->second.lastFences.clear();
4987            queue_data->second.lastFences.push_back(fence);
4988            for (auto cmdbuffer : queue_data->second.untrackedCmdBuffers) {
4989                fence_data->second.cmdBuffers.push_back(cmdbuffer);
4990            }
4991            queue_data->second.untrackedCmdBuffers.clear();
4992        }
4993        fence_data->second.cmdBuffers.clear();
4994        fence_data->second.priorFences = prior_fences;
4995        fence_data->second.needsSignaled = true;
4996        fence_data->second.queue = queue;
4997        fence_data->second.in_use.fetch_add(1);
4998        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4999            const VkSubmitInfo *submit = &pSubmits[submit_idx];
5000            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5001                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5002                    fence_data->second.cmdBuffers.push_back(secondaryCmdBuffer);
5003                }
5004                fence_data->second.cmdBuffers.push_back(submit->pCommandBuffers[i]);
5005            }
5006        }
5007    } else {
5008        if (queue_data != my_data->queueMap.end()) {
5009            for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5010                const VkSubmitInfo *submit = &pSubmits[submit_idx];
5011                for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5012                    for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5013                        queue_data->second.untrackedCmdBuffers.push_back(secondaryCmdBuffer);
5014                    }
5015                    queue_data->second.untrackedCmdBuffers.push_back(submit->pCommandBuffers[i]);
5016                }
5017            }
5018        }
5019    }
5020    if (queue_data != my_data->queueMap.end()) {
5021        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5022            const VkSubmitInfo *submit = &pSubmits[submit_idx];
5023            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5024                // Add cmdBuffers to both the global set and queue set
5025                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5026                    my_data->globalInFlightCmdBuffers.insert(secondaryCmdBuffer);
5027                    queue_data->second.inFlightCmdBuffers.insert(secondaryCmdBuffer);
5028                }
5029                my_data->globalInFlightCmdBuffers.insert(submit->pCommandBuffers[i]);
5030                queue_data->second.inFlightCmdBuffers.insert(submit->pCommandBuffers[i]);
5031            }
5032        }
5033    }
5034}
5035
5036static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5037    bool skip_call = false;
5038    if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
5039        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
5040        skip_call |=
5041            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5042                    __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
5043                    "Command Buffer %#" PRIx64 " is already in use and is not marked for simultaneous use.",
5044                    reinterpret_cast<uint64_t>(pCB->commandBuffer));
5045    }
5046    return skip_call;
5047}
5048
5049static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5050    bool skipCall = false;
5051    // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
5052    if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
5053        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5054                            __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
5055                            "CB %#" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
5056                            "set, but has been submitted %#" PRIxLEAST64 " times.",
5057                            (uint64_t)(pCB->commandBuffer), pCB->submitCount);
5058    }
5059    // Validate that cmd buffers have been updated
5060    if (CB_RECORDED != pCB->state) {
5061        if (CB_INVALID == pCB->state) {
5062            // Inform app of reason CB invalid
5063            bool causeReported = false;
5064            if (!pCB->destroyedSets.empty()) {
5065                std::stringstream set_string;
5066                for (auto set : pCB->destroyedSets)
5067                    set_string << " " << set;
5068
5069                skipCall |=
5070                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5071                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5072                            "You are submitting command buffer %#" PRIxLEAST64
5073                            " that is invalid because it had the following bound descriptor set(s) destroyed: %s",
5074                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
5075                causeReported = true;
5076            }
5077            if (!pCB->updatedSets.empty()) {
5078                std::stringstream set_string;
5079                for (auto set : pCB->updatedSets)
5080                    set_string << " " << set;
5081
5082                skipCall |=
5083                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5084                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5085                            "You are submitting command buffer %#" PRIxLEAST64
5086                            " that is invalid because it had the following bound descriptor set(s) updated: %s",
5087                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
5088                causeReported = true;
5089            }
5090            if (!pCB->destroyedFramebuffers.empty()) {
5091                std::stringstream fb_string;
5092                for (auto fb : pCB->destroyedFramebuffers)
5093                    fb_string << " " << fb;
5094
5095                skipCall |=
5096                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5097                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5098                            "You are submitting command buffer %#" PRIxLEAST64 " that is invalid because it had the following "
5099                            "referenced framebuffers destroyed: %s",
5100                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), fb_string.str().c_str());
5101                causeReported = true;
5102            }
5103            // TODO : This is defensive programming to make sure an error is
5104            //  flagged if we hit this INVALID cmd buffer case and none of the
5105            //  above cases are hit. As the number of INVALID cases grows, this
5106            //  code should be updated to seemlessly handle all the cases.
5107            if (!causeReported) {
5108                skipCall |= log_msg(
5109                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5110                    reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5111                    "You are submitting command buffer %#" PRIxLEAST64 " that is invalid due to an unknown cause. Validation "
5112                    "should "
5113                    "be improved to report the exact cause.",
5114                    reinterpret_cast<uint64_t &>(pCB->commandBuffer));
5115            }
5116        } else { // Flag error for using CB w/o vkEndCommandBuffer() called
5117            skipCall |=
5118                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5119                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
5120                        "You must call vkEndCommandBuffer() on CB %#" PRIxLEAST64 " before this call to vkQueueSubmit()!",
5121                        (uint64_t)(pCB->commandBuffer));
5122        }
5123    }
5124    return skipCall;
5125}
5126
5127static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5128    // Track in-use for resources off of primary and any secondary CBs
5129    bool skipCall = validateAndIncrementResources(dev_data, pCB);
5130    if (!pCB->secondaryCommandBuffers.empty()) {
5131        for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
5132            skipCall |= validateAndIncrementResources(dev_data, dev_data->commandBufferMap[secondaryCmdBuffer]);
5133            GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
5134            if (pSubCB->primaryCommandBuffer != pCB->commandBuffer) {
5135                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5136                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
5137                        "CB %#" PRIxLEAST64 " was submitted with secondary buffer %#" PRIxLEAST64
5138                        " but that buffer has subsequently been bound to "
5139                        "primary cmd buffer %#" PRIxLEAST64 ".",
5140                        reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer),
5141                        reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer));
5142            }
5143        }
5144    }
5145    skipCall |= validateCommandBufferState(dev_data, pCB);
5146    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
5147    // on device
5148    skipCall |= validateCommandBufferSimultaneousUse(dev_data, pCB);
5149    return skipCall;
5150}
5151
5152VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5153vkQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
5154    bool skipCall = false;
5155    GLOBAL_CB_NODE *pCBNode = NULL;
5156    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5157    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5158    loader_platform_thread_lock_mutex(&globalLock);
5159    // First verify that fence is not in use
5160    if ((fence != VK_NULL_HANDLE) && (submitCount != 0) && dev_data->fenceMap[fence].in_use.load()) {
5161        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5162                            (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5163                            "Fence %#" PRIx64 " is already in use by another submission.", (uint64_t)(fence));
5164    }
5165    uint64_t fenceId = 0;
5166    skipCall = add_fence_info(dev_data, fence, queue, &fenceId);
5167    // TODO : Review these old print functions and clean up as appropriate
5168    print_mem_list(dev_data);
5169    printCBList(dev_data);
5170    // Now verify each individual submit
5171    std::unordered_set<VkQueue> processed_other_queues;
5172    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5173        const VkSubmitInfo *submit = &pSubmits[submit_idx];
5174        vector<VkSemaphore> semaphoreList;
5175        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
5176            const VkSemaphore &semaphore = submit->pWaitSemaphores[i];
5177            semaphoreList.push_back(semaphore);
5178            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
5179                if (dev_data->semaphoreMap[semaphore].signaled) {
5180                    dev_data->semaphoreMap[semaphore].signaled = false;
5181                } else {
5182                    skipCall |=
5183                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5184                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
5185                                "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
5186                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
5187                }
5188                const VkQueue &other_queue = dev_data->semaphoreMap[semaphore].queue;
5189                if (other_queue != VK_NULL_HANDLE && !processed_other_queues.count(other_queue)) {
5190                    updateTrackedCommandBuffers(dev_data, queue, other_queue, fence);
5191                    processed_other_queues.insert(other_queue);
5192                }
5193            }
5194        }
5195        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
5196            const VkSemaphore &semaphore = submit->pSignalSemaphores[i];
5197            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
5198                semaphoreList.push_back(semaphore);
5199                if (dev_data->semaphoreMap[semaphore].signaled) {
5200                    skipCall |=
5201                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5202                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
5203                                "Queue %#" PRIx64 " is signaling semaphore %#" PRIx64
5204                                " that has already been signaled but not waited on by queue %#" PRIx64 ".",
5205                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore),
5206                                reinterpret_cast<uint64_t &>(dev_data->semaphoreMap[semaphore].queue));
5207                } else {
5208                    dev_data->semaphoreMap[semaphore].signaled = true;
5209                    dev_data->semaphoreMap[semaphore].queue = queue;
5210                }
5211            }
5212        }
5213        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
5214            skipCall |= ValidateCmdBufImageLayouts(submit->pCommandBuffers[i]);
5215            pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
5216            if (pCBNode) {
5217                pCBNode->semaphores = semaphoreList;
5218                pCBNode->submitCount++; // increment submit count
5219                pCBNode->fenceId = fenceId;
5220                pCBNode->lastSubmittedFence = fence;
5221                pCBNode->lastSubmittedQueue = queue;
5222                skipCall |= validatePrimaryCommandBufferState(dev_data, pCBNode);
5223                // Call submit-time functions to validate/update state
5224                for (auto &function : pCBNode->validate_functions) {
5225                    skipCall |= function();
5226                }
5227                for (auto &function : pCBNode->eventUpdates) {
5228                    skipCall |= function(queue);
5229                }
5230            }
5231        }
5232    }
5233    // Update cmdBuffer-related data structs and mark fence in-use
5234    trackCommandBuffers(dev_data, queue, submitCount, pSubmits, fence);
5235    loader_platform_thread_unlock_mutex(&globalLock);
5236    if (!skipCall)
5237        result = dev_data->device_dispatch_table->QueueSubmit(queue, submitCount, pSubmits, fence);
5238
5239    return result;
5240}
5241
5242#if MTMERGESOURCE
5243VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
5244                                                                const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
5245    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5246    VkResult result = my_data->device_dispatch_table->AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
5247    // TODO : Track allocations and overall size here
5248    loader_platform_thread_lock_mutex(&globalLock);
5249    add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo);
5250    print_mem_list(my_data);
5251    loader_platform_thread_unlock_mutex(&globalLock);
5252    return result;
5253}
5254
5255VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5256vkFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
5257    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5258
5259    // From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed.
5260    // Before freeing a memory object, an application must ensure the memory object is no longer
5261    // in use by the device—for example by command buffers queued for execution. The memory need
5262    // not yet be unbound from all images and buffers, but any further use of those images or
5263    // buffers (on host or device) for anything other than destroying those objects will result in
5264    // undefined behavior.
5265
5266    loader_platform_thread_lock_mutex(&globalLock);
5267    freeMemObjInfo(my_data, device, mem, false);
5268    print_mem_list(my_data);
5269    printCBList(my_data);
5270    loader_platform_thread_unlock_mutex(&globalLock);
5271    my_data->device_dispatch_table->FreeMemory(device, mem, pAllocator);
5272}
5273
5274static bool validateMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5275    bool skipCall = false;
5276
5277    if (size == 0) {
5278        // TODO: a size of 0 is not listed as an invalid use in the spec, should it be?
5279        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5280                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5281                           "VkMapMemory: Attempting to map memory range of size zero");
5282    }
5283
5284    auto mem_element = my_data->memObjMap.find(mem);
5285    if (mem_element != my_data->memObjMap.end()) {
5286        // It is an application error to call VkMapMemory on an object that is already mapped
5287        if (mem_element->second.memRange.size != 0) {
5288            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5289                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5290                               "VkMapMemory: Attempting to map memory on an already-mapped object %#" PRIxLEAST64, (uint64_t)mem);
5291        }
5292
5293        // Validate that offset + size is within object's allocationSize
5294        if (size == VK_WHOLE_SIZE) {
5295            if (offset >= mem_element->second.allocInfo.allocationSize) {
5296                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5297                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5298                                   "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset,
5299                                   mem_element->second.allocInfo.allocationSize, mem_element->second.allocInfo.allocationSize);
5300            }
5301        } else {
5302            if ((offset + size) > mem_element->second.allocInfo.allocationSize) {
5303                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5304                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5305                                   "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset,
5306                                   size + offset, mem_element->second.allocInfo.allocationSize);
5307            }
5308        }
5309    }
5310    return skipCall;
5311}
5312
5313static void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5314    auto mem_element = my_data->memObjMap.find(mem);
5315    if (mem_element != my_data->memObjMap.end()) {
5316        MemRange new_range;
5317        new_range.offset = offset;
5318        new_range.size = size;
5319        mem_element->second.memRange = new_range;
5320    }
5321}
5322
5323static bool deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
5324    bool skipCall = false;
5325    auto mem_element = my_data->memObjMap.find(mem);
5326    if (mem_element != my_data->memObjMap.end()) {
5327        if (!mem_element->second.memRange.size) {
5328            // Valid Usage: memory must currently be mapped
5329            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5330                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5331                               "Unmapping Memory without memory being mapped: mem obj %#" PRIxLEAST64, (uint64_t)mem);
5332        }
5333        mem_element->second.memRange.size = 0;
5334        if (mem_element->second.pData) {
5335            free(mem_element->second.pData);
5336            mem_element->second.pData = 0;
5337        }
5338    }
5339    return skipCall;
5340}
5341
5342static char NoncoherentMemoryFillValue = 0xb;
5343
5344static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize size, void **ppData) {
5345    auto mem_element = dev_data->memObjMap.find(mem);
5346    if (mem_element != dev_data->memObjMap.end()) {
5347        mem_element->second.pDriverData = *ppData;
5348        uint32_t index = mem_element->second.allocInfo.memoryTypeIndex;
5349        if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
5350            mem_element->second.pData = 0;
5351        } else {
5352            if (size == VK_WHOLE_SIZE) {
5353                size = mem_element->second.allocInfo.allocationSize;
5354            }
5355            size_t convSize = (size_t)(size);
5356            mem_element->second.pData = malloc(2 * convSize);
5357            memset(mem_element->second.pData, NoncoherentMemoryFillValue, 2 * convSize);
5358            *ppData = static_cast<char *>(mem_element->second.pData) + (convSize / 2);
5359        }
5360    }
5361}
5362#endif
5363// Note: This function assumes that the global lock is held by the calling
5364// thread.
5365static bool cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) {
5366    bool skip_call = false;
5367    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
5368    if (pCB) {
5369        for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) {
5370            for (auto event : queryEventsPair.second) {
5371                if (my_data->eventMap[event].needsSignaled) {
5372                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5373                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
5374                                         "Cannot get query results on queryPool %" PRIu64
5375                                         " with index %d which was guarded by unsignaled event %" PRIu64 ".",
5376                                         (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
5377                }
5378            }
5379        }
5380    }
5381    return skip_call;
5382}
5383// Remove given cmd_buffer from the global inFlight set.
5384//  Also, if given queue is valid, then remove the cmd_buffer from that queues
5385//  inFlightCmdBuffer set. Finally, check all other queues and if given cmd_buffer
5386//  is still in flight on another queue, add it back into the global set.
5387// Note: This function assumes that the global lock is held by the calling
5388// thread.
5389static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkQueue queue) {
5390    // Pull it off of global list initially, but if we find it in any other queue list, add it back in
5391    dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
5392    if (dev_data->queueMap.find(queue) != dev_data->queueMap.end()) {
5393        dev_data->queueMap[queue].inFlightCmdBuffers.erase(cmd_buffer);
5394        for (auto q : dev_data->queues) {
5395            if ((q != queue) &&
5396                (dev_data->queueMap[q].inFlightCmdBuffers.find(cmd_buffer) != dev_data->queueMap[q].inFlightCmdBuffers.end())) {
5397                dev_data->globalInFlightCmdBuffers.insert(cmd_buffer);
5398                break;
5399            }
5400        }
5401    }
5402}
5403#if MTMERGESOURCE
5404static inline bool verifyFenceStatus(VkDevice device, VkFence fence, const char *apiCall) {
5405    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5406    bool skipCall = false;
5407    auto pFenceInfo = my_data->fenceMap.find(fence);
5408    if (pFenceInfo != my_data->fenceMap.end()) {
5409        if (!pFenceInfo->second.firstTimeFlag) {
5410            if ((pFenceInfo->second.createInfo.flags & VK_FENCE_CREATE_SIGNALED_BIT) && !pFenceInfo->second.firstTimeFlag) {
5411                skipCall |=
5412                    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5413                            (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5414                            "%s specified fence %#" PRIxLEAST64 " already in SIGNALED state.", apiCall, (uint64_t)fence);
5415            }
5416            if (!pFenceInfo->second.queue && !pFenceInfo->second.swapchain) { // Checking status of unsubmitted fence
5417                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5418                                    reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5419                                    "%s called for fence %#" PRIxLEAST64 " which has not been submitted on a Queue or during "
5420                                    "acquire next image.",
5421                                    apiCall, reinterpret_cast<uint64_t &>(fence));
5422            }
5423        } else {
5424            pFenceInfo->second.firstTimeFlag = false;
5425        }
5426    }
5427    return skipCall;
5428}
5429#endif
5430VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5431vkWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
5432    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5433    bool skip_call = false;
5434#if MTMERGESOURCE
5435    // Verify fence status of submitted fences
5436    loader_platform_thread_lock_mutex(&globalLock);
5437    for (uint32_t i = 0; i < fenceCount; i++) {
5438        skip_call |= verifyFenceStatus(device, pFences[i], "vkWaitForFences");
5439    }
5440    loader_platform_thread_unlock_mutex(&globalLock);
5441    if (skip_call)
5442        return VK_ERROR_VALIDATION_FAILED_EXT;
5443#endif
5444    VkResult result = dev_data->device_dispatch_table->WaitForFences(device, fenceCount, pFences, waitAll, timeout);
5445
5446    if (result == VK_SUCCESS) {
5447        loader_platform_thread_lock_mutex(&globalLock);
5448        // When we know that all fences are complete we can clean/remove their CBs
5449        if (waitAll || fenceCount == 1) {
5450            for (uint32_t i = 0; i < fenceCount; ++i) {
5451#if MTMERGESOURCE
5452                update_fence_tracking(dev_data, pFences[i]);
5453#endif
5454                VkQueue fence_queue = dev_data->fenceMap[pFences[i]].queue;
5455                for (auto cmdBuffer : dev_data->fenceMap[pFences[i]].cmdBuffers) {
5456                    skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5457                    removeInFlightCmdBuffer(dev_data, cmdBuffer, fence_queue);
5458                }
5459            }
5460            decrementResources(dev_data, fenceCount, pFences);
5461        }
5462        // NOTE : Alternate case not handled here is when some fences have completed. In
5463        //  this case for app to guarantee which fences completed it will have to call
5464        //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
5465        loader_platform_thread_unlock_mutex(&globalLock);
5466    }
5467    if (skip_call)
5468        return VK_ERROR_VALIDATION_FAILED_EXT;
5469    return result;
5470}
5471
5472VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(VkDevice device, VkFence fence) {
5473    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5474    bool skipCall = false;
5475    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5476#if MTMERGESOURCE
5477    loader_platform_thread_lock_mutex(&globalLock);
5478    skipCall = verifyFenceStatus(device, fence, "vkGetFenceStatus");
5479    loader_platform_thread_unlock_mutex(&globalLock);
5480    if (skipCall)
5481        return result;
5482#endif
5483    result = dev_data->device_dispatch_table->GetFenceStatus(device, fence);
5484    bool skip_call = false;
5485    loader_platform_thread_lock_mutex(&globalLock);
5486    if (result == VK_SUCCESS) {
5487#if MTMERGESOURCE
5488        update_fence_tracking(dev_data, fence);
5489#endif
5490        auto fence_queue = dev_data->fenceMap[fence].queue;
5491        for (auto cmdBuffer : dev_data->fenceMap[fence].cmdBuffers) {
5492            skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5493            removeInFlightCmdBuffer(dev_data, cmdBuffer, fence_queue);
5494        }
5495        decrementResources(dev_data, 1, &fence);
5496    }
5497    loader_platform_thread_unlock_mutex(&globalLock);
5498    if (skip_call)
5499        return VK_ERROR_VALIDATION_FAILED_EXT;
5500    return result;
5501}
5502
5503VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
5504                                                            VkQueue *pQueue) {
5505    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5506    dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
5507    loader_platform_thread_lock_mutex(&globalLock);
5508
5509    // Add queue to tracking set only if it is new
5510    auto result = dev_data->queues.emplace(*pQueue);
5511    if (result.second == true) {
5512        QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue];
5513        pQNode->device = device;
5514#if MTMERGESOURCE
5515        pQNode->lastRetiredId = 0;
5516        pQNode->lastSubmittedId = 0;
5517#endif
5518    }
5519
5520    loader_platform_thread_unlock_mutex(&globalLock);
5521}
5522
5523VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(VkQueue queue) {
5524    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5525    decrementResources(dev_data, queue);
5526    bool skip_call = false;
5527    loader_platform_thread_lock_mutex(&globalLock);
5528    // Iterate over local set since we erase set members as we go in for loop
5529    auto local_cb_set = dev_data->queueMap[queue].inFlightCmdBuffers;
5530    for (auto cmdBuffer : local_cb_set) {
5531        skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5532        removeInFlightCmdBuffer(dev_data, cmdBuffer, queue);
5533    }
5534    dev_data->queueMap[queue].inFlightCmdBuffers.clear();
5535    loader_platform_thread_unlock_mutex(&globalLock);
5536    if (skip_call)
5537        return VK_ERROR_VALIDATION_FAILED_EXT;
5538    VkResult result = dev_data->device_dispatch_table->QueueWaitIdle(queue);
5539#if MTMERGESOURCE
5540    if (VK_SUCCESS == result) {
5541        loader_platform_thread_lock_mutex(&globalLock);
5542        retire_queue_fences(dev_data, queue);
5543        loader_platform_thread_unlock_mutex(&globalLock);
5544    }
5545#endif
5546    return result;
5547}
5548
5549VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(VkDevice device) {
5550    bool skip_call = false;
5551    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5552    loader_platform_thread_lock_mutex(&globalLock);
5553    for (auto queue : dev_data->queues) {
5554        decrementResources(dev_data, queue);
5555        if (dev_data->queueMap.find(queue) != dev_data->queueMap.end()) {
5556            // Clear all of the queue inFlightCmdBuffers (global set cleared below)
5557            dev_data->queueMap[queue].inFlightCmdBuffers.clear();
5558        }
5559    }
5560    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5561        skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5562    }
5563    dev_data->globalInFlightCmdBuffers.clear();
5564    loader_platform_thread_unlock_mutex(&globalLock);
5565    if (skip_call)
5566        return VK_ERROR_VALIDATION_FAILED_EXT;
5567    VkResult result = dev_data->device_dispatch_table->DeviceWaitIdle(device);
5568#if MTMERGESOURCE
5569    if (VK_SUCCESS == result) {
5570        loader_platform_thread_lock_mutex(&globalLock);
5571        retire_device_fences(dev_data, device);
5572        loader_platform_thread_unlock_mutex(&globalLock);
5573    }
5574#endif
5575    return result;
5576}
5577
5578VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
5579    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5580    bool skipCall = false;
5581    loader_platform_thread_lock_mutex(&globalLock);
5582    auto fence_pair = dev_data->fenceMap.find(fence);
5583    if (fence_pair != dev_data->fenceMap.end()) {
5584        if (fence_pair->second.in_use.load()) {
5585            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5586                                (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5587                                "Fence %#" PRIx64 " is in use by a command buffer.", (uint64_t)(fence));
5588        }
5589        dev_data->fenceMap.erase(fence_pair);
5590    }
5591    loader_platform_thread_unlock_mutex(&globalLock);
5592
5593    if (!skipCall)
5594        dev_data->device_dispatch_table->DestroyFence(device, fence, pAllocator);
5595}
5596
5597VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5598vkDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
5599    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5600    dev_data->device_dispatch_table->DestroySemaphore(device, semaphore, pAllocator);
5601    loader_platform_thread_lock_mutex(&globalLock);
5602    auto item = dev_data->semaphoreMap.find(semaphore);
5603    if (item != dev_data->semaphoreMap.end()) {
5604        if (item->second.in_use.load()) {
5605            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5606                    reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
5607                    "Cannot delete semaphore %" PRIx64 " which is in use.", reinterpret_cast<uint64_t &>(semaphore));
5608        }
5609        dev_data->semaphoreMap.erase(semaphore);
5610    }
5611    loader_platform_thread_unlock_mutex(&globalLock);
5612    // TODO : Clean up any internal data structures using this obj.
5613}
5614
5615VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
5616    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5617    bool skip_call = false;
5618    loader_platform_thread_lock_mutex(&globalLock);
5619    auto event_data = dev_data->eventMap.find(event);
5620    if (event_data != dev_data->eventMap.end()) {
5621        if (event_data->second.in_use.load()) {
5622            skip_call |= log_msg(
5623                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5624                reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
5625                "Cannot delete event %" PRIx64 " which is in use by a command buffer.", reinterpret_cast<uint64_t &>(event));
5626        }
5627        dev_data->eventMap.erase(event_data);
5628    }
5629    loader_platform_thread_unlock_mutex(&globalLock);
5630    if (!skip_call)
5631        dev_data->device_dispatch_table->DestroyEvent(device, event, pAllocator);
5632    // TODO : Clean up any internal data structures using this obj.
5633}
5634
5635VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5636vkDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
5637    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5638        ->device_dispatch_table->DestroyQueryPool(device, queryPool, pAllocator);
5639    // TODO : Clean up any internal data structures using this obj.
5640}
5641
5642VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
5643                                                     uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
5644                                                     VkQueryResultFlags flags) {
5645    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5646    unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight;
5647    GLOBAL_CB_NODE *pCB = nullptr;
5648    loader_platform_thread_lock_mutex(&globalLock);
5649    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5650        pCB = getCBNode(dev_data, cmdBuffer);
5651        for (auto queryStatePair : pCB->queryToStateMap) {
5652            queriesInFlight[queryStatePair.first].push_back(cmdBuffer);
5653        }
5654    }
5655    bool skip_call = false;
5656    for (uint32_t i = 0; i < queryCount; ++i) {
5657        QueryObject query = {queryPool, firstQuery + i};
5658        auto queryElement = queriesInFlight.find(query);
5659        auto queryToStateElement = dev_data->queryToStateMap.find(query);
5660        if (queryToStateElement != dev_data->queryToStateMap.end()) {
5661            // Available and in flight
5662            if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5663                queryToStateElement->second) {
5664                for (auto cmdBuffer : queryElement->second) {
5665                    pCB = getCBNode(dev_data, cmdBuffer);
5666                    auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query);
5667                    if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) {
5668                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5669                                             VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5670                                             "Cannot get query results on queryPool %" PRIu64 " with index %d which is in flight.",
5671                                             (uint64_t)(queryPool), firstQuery + i);
5672                    } else {
5673                        for (auto event : queryEventElement->second) {
5674                            dev_data->eventMap[event].needsSignaled = true;
5675                        }
5676                    }
5677                }
5678                // Unavailable and in flight
5679            } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5680                       !queryToStateElement->second) {
5681                // TODO : Can there be the same query in use by multiple command buffers in flight?
5682                bool make_available = false;
5683                for (auto cmdBuffer : queryElement->second) {
5684                    pCB = getCBNode(dev_data, cmdBuffer);
5685                    make_available |= pCB->queryToStateMap[query];
5686                }
5687                if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5688                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5689                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5690                                         "Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.",
5691                                         (uint64_t)(queryPool), firstQuery + i);
5692                }
5693                // Unavailable
5694            } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) {
5695                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5696                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5697                                     "Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.",
5698                                     (uint64_t)(queryPool), firstQuery + i);
5699                // Unitialized
5700            } else if (queryToStateElement == dev_data->queryToStateMap.end()) {
5701                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5702                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5703                                     "Cannot get query results on queryPool %" PRIu64
5704                                     " with index %d as data has not been collected for this index.",
5705                                     (uint64_t)(queryPool), firstQuery + i);
5706            }
5707        }
5708    }
5709    loader_platform_thread_unlock_mutex(&globalLock);
5710    if (skip_call)
5711        return VK_ERROR_VALIDATION_FAILED_EXT;
5712    return dev_data->device_dispatch_table->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride,
5713                                                                flags);
5714}
5715
5716static bool validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5717    bool skip_call = false;
5718    auto buffer_data = my_data->bufferMap.find(buffer);
5719    if (buffer_data == my_data->bufferMap.end()) {
5720        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5721                             (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5722                             "Cannot free buffer %" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5723    } else {
5724        if (buffer_data->second.in_use.load()) {
5725            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5726                                 (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
5727                                 "Cannot free buffer %" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer));
5728        }
5729    }
5730    return skip_call;
5731}
5732
5733VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5734vkDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
5735    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5736    bool skipCall = false;
5737    loader_platform_thread_lock_mutex(&globalLock);
5738    if (!validateIdleBuffer(dev_data, buffer) && !skipCall) {
5739        loader_platform_thread_unlock_mutex(&globalLock);
5740        dev_data->device_dispatch_table->DestroyBuffer(device, buffer, pAllocator);
5741        loader_platform_thread_lock_mutex(&globalLock);
5742    }
5743    dev_data->bufferMap.erase(buffer);
5744    loader_platform_thread_unlock_mutex(&globalLock);
5745}
5746
5747VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5748vkDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5749    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5750    dev_data->device_dispatch_table->DestroyBufferView(device, bufferView, pAllocator);
5751    loader_platform_thread_lock_mutex(&globalLock);
5752    auto item = dev_data->bufferViewMap.find(bufferView);
5753    if (item != dev_data->bufferViewMap.end()) {
5754        dev_data->bufferViewMap.erase(item);
5755    }
5756    loader_platform_thread_unlock_mutex(&globalLock);
5757}
5758
5759VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5760    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5761    bool skipCall = false;
5762    if (!skipCall)
5763        dev_data->device_dispatch_table->DestroyImage(device, image, pAllocator);
5764
5765    loader_platform_thread_lock_mutex(&globalLock);
5766    const auto& entry = dev_data->imageMap.find(image);
5767    if (entry != dev_data->imageMap.end()) {
5768        // Clear any memory mapping for this image
5769        auto mem_entry = dev_data->memObjMap.find(entry->second.mem);
5770        if (mem_entry != dev_data->memObjMap.end())
5771            mem_entry->second.image = VK_NULL_HANDLE;
5772
5773        // Remove image from imageMap
5774        dev_data->imageMap.erase(entry);
5775    }
5776    const auto& subEntry = dev_data->imageSubresourceMap.find(image);
5777    if (subEntry != dev_data->imageSubresourceMap.end()) {
5778        for (const auto& pair : subEntry->second) {
5779            dev_data->imageLayoutMap.erase(pair);
5780        }
5781        dev_data->imageSubresourceMap.erase(subEntry);
5782    }
5783    loader_platform_thread_unlock_mutex(&globalLock);
5784}
5785#if MTMERGESOURCE
5786static bool print_memory_range_error(layer_data *dev_data, const uint64_t object_handle, const uint64_t other_handle,
5787                                     VkDebugReportObjectTypeEXT object_type) {
5788    if (object_type == VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT) {
5789        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5790                       MEMTRACK_INVALID_ALIASING, "MEM", "Buffer %" PRIx64 " is alised with image %" PRIx64, object_handle,
5791                       other_handle);
5792    } else {
5793        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5794                       MEMTRACK_INVALID_ALIASING, "MEM", "Image %" PRIx64 " is alised with buffer %" PRIx64, object_handle,
5795                       other_handle);
5796    }
5797}
5798
5799static bool validate_memory_range(layer_data *dev_data, const vector<MEMORY_RANGE> &ranges, const MEMORY_RANGE &new_range,
5800                                  VkDebugReportObjectTypeEXT object_type) {
5801    bool skip_call = false;
5802
5803    for (auto range : ranges) {
5804        if ((range.end & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)) <
5805            (new_range.start & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)))
5806            continue;
5807        if ((range.start & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)) >
5808            (new_range.end & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)))
5809            continue;
5810        skip_call |= print_memory_range_error(dev_data, new_range.handle, range.handle, object_type);
5811    }
5812    return skip_call;
5813}
5814
5815static bool validate_buffer_image_aliasing(layer_data *dev_data, uint64_t handle, VkDeviceMemory mem, VkDeviceSize memoryOffset,
5816                                           VkMemoryRequirements memRequirements, vector<MEMORY_RANGE> &ranges,
5817                                           const vector<MEMORY_RANGE> &other_ranges, VkDebugReportObjectTypeEXT object_type) {
5818    MEMORY_RANGE range;
5819    range.handle = handle;
5820    range.memory = mem;
5821    range.start = memoryOffset;
5822    range.end = memoryOffset + memRequirements.size - 1;
5823    ranges.push_back(range);
5824    return validate_memory_range(dev_data, other_ranges, range, object_type);
5825}
5826
5827VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5828vkBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
5829    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5830    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5831    loader_platform_thread_lock_mutex(&globalLock);
5832    // Track objects tied to memory
5833    uint64_t buffer_handle = (uint64_t)(buffer);
5834    bool skipCall =
5835        set_mem_binding(dev_data, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
5836    auto buffer_node = dev_data->bufferMap.find(buffer);
5837    if (buffer_node != dev_data->bufferMap.end()) {
5838        buffer_node->second.mem = mem;
5839        VkMemoryRequirements memRequirements;
5840        dev_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, &memRequirements);
5841        skipCall |= validate_buffer_image_aliasing(dev_data, buffer_handle, mem, memoryOffset, memRequirements,
5842                                                   dev_data->memObjMap[mem].bufferRanges, dev_data->memObjMap[mem].imageRanges,
5843                                                   VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5844        // Validate memory requirements alignment
5845        if (vk_safe_modulo(memoryOffset, memRequirements.alignment) != 0) {
5846            skipCall |=
5847                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
5848                        __LINE__, DRAWSTATE_INVALID_BUFFER_MEMORY_OFFSET, "DS",
5849                        "vkBindBufferMemory(): memoryOffset is %#" PRIxLEAST64 " but must be an integer multiple of the "
5850                        "VkMemoryRequirements::alignment value %#" PRIxLEAST64
5851                        ", returned from a call to vkGetBufferMemoryRequirements with buffer",
5852                        memoryOffset, memRequirements.alignment);
5853        }
5854        // Validate device limits alignments
5855        VkBufferUsageFlags usage = dev_data->bufferMap[buffer].createInfo.usage;
5856        if (usage & (VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT)) {
5857            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment) != 0) {
5858                skipCall |=
5859                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5860                            0, __LINE__, DRAWSTATE_INVALID_TEXEL_BUFFER_OFFSET, "DS",
5861                            "vkBindBufferMemory(): memoryOffset is %#" PRIxLEAST64 " but must be a multiple of "
5862                            "device limit minTexelBufferOffsetAlignment %#" PRIxLEAST64,
5863                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment);
5864            }
5865        }
5866        if (usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) {
5867            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) !=
5868                0) {
5869                skipCall |=
5870                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5871                            0, __LINE__, DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
5872                            "vkBindBufferMemory(): memoryOffset is %#" PRIxLEAST64 " but must be a multiple of "
5873                            "device limit minUniformBufferOffsetAlignment %#" PRIxLEAST64,
5874                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
5875            }
5876        }
5877        if (usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) {
5878            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) !=
5879                0) {
5880                skipCall |=
5881                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5882                            0, __LINE__, DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
5883                            "vkBindBufferMemory(): memoryOffset is %#" PRIxLEAST64 " but must be a multiple of "
5884                            "device limit minStorageBufferOffsetAlignment %#" PRIxLEAST64,
5885                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
5886            }
5887        }
5888    }
5889    print_mem_list(dev_data);
5890    loader_platform_thread_unlock_mutex(&globalLock);
5891    if (!skipCall) {
5892        result = dev_data->device_dispatch_table->BindBufferMemory(device, buffer, mem, memoryOffset);
5893    }
5894    return result;
5895}
5896
5897VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5898vkGetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
5899    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5900    // TODO : What to track here?
5901    //   Could potentially save returned mem requirements and validate values passed into BindBufferMemory
5902    my_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
5903}
5904
5905VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5906vkGetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5907    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5908    // TODO : What to track here?
5909    //   Could potentially save returned mem requirements and validate values passed into BindImageMemory
5910    my_data->device_dispatch_table->GetImageMemoryRequirements(device, image, pMemoryRequirements);
5911}
5912#endif
5913VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5914vkDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
5915    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5916        ->device_dispatch_table->DestroyImageView(device, imageView, pAllocator);
5917    // TODO : Clean up any internal data structures using this obj.
5918}
5919
5920VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5921vkDestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
5922    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5923
5924    loader_platform_thread_lock_mutex(&globalLock);
5925
5926    my_data->shaderModuleMap.erase(shaderModule);
5927
5928    loader_platform_thread_unlock_mutex(&globalLock);
5929
5930    my_data->device_dispatch_table->DestroyShaderModule(device, shaderModule, pAllocator);
5931}
5932
5933VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5934vkDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
5935    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyPipeline(device, pipeline, pAllocator);
5936    // TODO : Clean up any internal data structures using this obj.
5937}
5938
5939VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5940vkDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
5941    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5942        ->device_dispatch_table->DestroyPipelineLayout(device, pipelineLayout, pAllocator);
5943    // TODO : Clean up any internal data structures using this obj.
5944}
5945
5946VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5947vkDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
5948    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroySampler(device, sampler, pAllocator);
5949    // TODO : Clean up any internal data structures using this obj.
5950}
5951
5952VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5953vkDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
5954    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5955        ->device_dispatch_table->DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
5956    // TODO : Clean up any internal data structures using this obj.
5957}
5958
5959VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5960vkDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
5961    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5962        ->device_dispatch_table->DestroyDescriptorPool(device, descriptorPool, pAllocator);
5963    // TODO : Clean up any internal data structures using this obj.
5964}
5965
5966VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5967vkFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
5968    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5969
5970    bool skip_call = false;
5971    loader_platform_thread_lock_mutex(&globalLock);
5972    for (uint32_t i = 0; i < commandBufferCount; i++) {
5973        if (dev_data->globalInFlightCmdBuffers.count(pCommandBuffers[i])) {
5974            skip_call |=
5975                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5976                        reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
5977                        "Attempt to free command buffer (%#" PRIxLEAST64 ") which is in use.",
5978                        reinterpret_cast<uint64_t>(pCommandBuffers[i]));
5979        }
5980        // Delete CB information structure, and remove from commandBufferMap
5981        auto cb = dev_data->commandBufferMap.find(pCommandBuffers[i]);
5982        if (cb != dev_data->commandBufferMap.end()) {
5983            // reset prior to delete for data clean-up
5984            resetCB(dev_data, (*cb).second->commandBuffer);
5985            delete (*cb).second;
5986            dev_data->commandBufferMap.erase(cb);
5987        }
5988
5989        // Remove commandBuffer reference from commandPoolMap
5990        dev_data->commandPoolMap[commandPool].commandBuffers.remove(pCommandBuffers[i]);
5991    }
5992#if MTMERGESOURCE
5993    printCBList(dev_data);
5994#endif
5995    loader_platform_thread_unlock_mutex(&globalLock);
5996
5997    if (!skip_call)
5998        dev_data->device_dispatch_table->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
5999}
6000
6001VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
6002                                                                   const VkAllocationCallbacks *pAllocator,
6003                                                                   VkCommandPool *pCommandPool) {
6004    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6005
6006    VkResult result = dev_data->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
6007
6008    if (VK_SUCCESS == result) {
6009        loader_platform_thread_lock_mutex(&globalLock);
6010        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
6011        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
6012        loader_platform_thread_unlock_mutex(&globalLock);
6013    }
6014    return result;
6015}
6016
6017VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
6018                                                                 const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
6019
6020    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6021    VkResult result = dev_data->device_dispatch_table->CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
6022    if (result == VK_SUCCESS) {
6023        loader_platform_thread_lock_mutex(&globalLock);
6024        dev_data->queryPoolMap[*pQueryPool].createInfo = *pCreateInfo;
6025        loader_platform_thread_unlock_mutex(&globalLock);
6026    }
6027    return result;
6028}
6029
6030static bool validateCommandBuffersNotInUse(const layer_data *dev_data, VkCommandPool commandPool, const char *action) {
6031    bool skipCall = false;
6032    auto pool_data = dev_data->commandPoolMap.find(commandPool);
6033    if (pool_data != dev_data->commandPoolMap.end()) {
6034        for (auto cmdBuffer : pool_data->second.commandBuffers) {
6035            if (dev_data->globalInFlightCmdBuffers.count(cmdBuffer)) {
6036                skipCall |=
6037                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT,
6038                            (uint64_t)(commandPool), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
6039                            "Cannot %s command pool %" PRIx64 " when allocated command buffer %" PRIx64 " is in use.", action,
6040                            reinterpret_cast<const uint64_t &>(commandPool), reinterpret_cast<const uint64_t &>(cmdBuffer));
6041            }
6042        }
6043    }
6044    return skipCall;
6045}
6046
6047// Destroy commandPool along with all of the commandBuffers allocated from that pool
6048VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6049vkDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
6050    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6051    bool skipCall = false;
6052    loader_platform_thread_lock_mutex(&globalLock);
6053    // Verify that command buffers in pool are complete (not in-flight)
6054    VkBool32 result = validateCommandBuffersNotInUse(dev_data, commandPool, "destroy");
6055    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandPoolMap
6056    if (dev_data->commandPoolMap.find(commandPool) != dev_data->commandPoolMap.end()) {
6057        for (auto poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6058             poolCb != dev_data->commandPoolMap[commandPool].commandBuffers.end();) {
6059            clear_cmd_buf_and_mem_references(dev_data, *poolCb);
6060            auto del_cb = dev_data->commandBufferMap.find(*poolCb);
6061            delete (*del_cb).second;                  // delete CB info structure
6062            dev_data->commandBufferMap.erase(del_cb); // Remove this command buffer
6063            poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.erase(
6064                poolCb); // Remove CB reference from commandPoolMap's list
6065        }
6066    }
6067    dev_data->commandPoolMap.erase(commandPool);
6068
6069    loader_platform_thread_unlock_mutex(&globalLock);
6070
6071    if (result)
6072        return;
6073
6074    if (!skipCall)
6075        dev_data->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator);
6076}
6077
6078VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6079vkResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
6080    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6081    bool skipCall = false;
6082    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6083
6084    if (validateCommandBuffersNotInUse(dev_data, commandPool, "reset"))
6085        return VK_ERROR_VALIDATION_FAILED_EXT;
6086
6087    if (!skipCall)
6088        result = dev_data->device_dispatch_table->ResetCommandPool(device, commandPool, flags);
6089
6090    // Reset all of the CBs allocated from this pool
6091    if (VK_SUCCESS == result) {
6092        loader_platform_thread_lock_mutex(&globalLock);
6093        auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6094        while (it != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
6095            resetCB(dev_data, (*it));
6096            ++it;
6097        }
6098        loader_platform_thread_unlock_mutex(&globalLock);
6099    }
6100    return result;
6101}
6102
6103VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
6104    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6105    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6106    bool skipCall = false;
6107    loader_platform_thread_lock_mutex(&globalLock);
6108    for (uint32_t i = 0; i < fenceCount; ++i) {
6109#if MTMERGESOURCE
6110        // Reset fence state in fenceCreateInfo structure
6111        // MTMTODO : Merge with code below
6112        auto fence_item = dev_data->fenceMap.find(pFences[i]);
6113        if (fence_item != dev_data->fenceMap.end()) {
6114            // Validate fences in SIGNALED state
6115            if (!(fence_item->second.createInfo.flags & VK_FENCE_CREATE_SIGNALED_BIT)) {
6116                // TODO: I don't see a Valid Usage section for ResetFences. This behavior should be documented there.
6117                skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6118                                   (uint64_t)pFences[i], __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
6119                                   "Fence %#" PRIxLEAST64 " submitted to VkResetFences in UNSIGNALED STATE", (uint64_t)pFences[i]);
6120            } else {
6121                fence_item->second.createInfo.flags =
6122                    static_cast<VkFenceCreateFlags>(fence_item->second.createInfo.flags & ~VK_FENCE_CREATE_SIGNALED_BIT);
6123            }
6124        }
6125#endif
6126        if (dev_data->fenceMap[pFences[i]].in_use.load()) {
6127            skipCall |=
6128                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6129                        reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
6130                        "Fence %#" PRIx64 " is in use by a command buffer.", reinterpret_cast<const uint64_t &>(pFences[i]));
6131        }
6132    }
6133    loader_platform_thread_unlock_mutex(&globalLock);
6134    if (!skipCall)
6135        result = dev_data->device_dispatch_table->ResetFences(device, fenceCount, pFences);
6136    return result;
6137}
6138
6139VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6140vkDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
6141    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6142    loader_platform_thread_lock_mutex(&globalLock);
6143    auto fbNode = dev_data->frameBufferMap.find(framebuffer);
6144    if (fbNode != dev_data->frameBufferMap.end()) {
6145        for (auto cb : fbNode->second.referencingCmdBuffers) {
6146            auto cbNode = dev_data->commandBufferMap.find(cb);
6147            if (cbNode != dev_data->commandBufferMap.end()) {
6148                // Set CB as invalid and record destroyed framebuffer
6149                cbNode->second->state = CB_INVALID;
6150                cbNode->second->destroyedFramebuffers.insert(framebuffer);
6151            }
6152        }
6153        delete [] fbNode->second.createInfo.pAttachments;
6154        dev_data->frameBufferMap.erase(fbNode);
6155    }
6156    loader_platform_thread_unlock_mutex(&globalLock);
6157    dev_data->device_dispatch_table->DestroyFramebuffer(device, framebuffer, pAllocator);
6158}
6159
6160VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6161vkDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
6162    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6163    dev_data->device_dispatch_table->DestroyRenderPass(device, renderPass, pAllocator);
6164    loader_platform_thread_lock_mutex(&globalLock);
6165    dev_data->renderPassMap.erase(renderPass);
6166    loader_platform_thread_unlock_mutex(&globalLock);
6167}
6168
6169VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
6170                                                              const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
6171    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6172
6173    VkResult result = dev_data->device_dispatch_table->CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
6174
6175    if (VK_SUCCESS == result) {
6176        loader_platform_thread_lock_mutex(&globalLock);
6177        // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
6178        dev_data->bufferMap[*pBuffer].createInfo = *pCreateInfo;
6179        dev_data->bufferMap[*pBuffer].in_use.store(0);
6180        loader_platform_thread_unlock_mutex(&globalLock);
6181    }
6182    return result;
6183}
6184
6185VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
6186                                                                  const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
6187    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6188    VkResult result = dev_data->device_dispatch_table->CreateBufferView(device, pCreateInfo, pAllocator, pView);
6189    if (VK_SUCCESS == result) {
6190        loader_platform_thread_lock_mutex(&globalLock);
6191        dev_data->bufferViewMap[*pView] = VkBufferViewCreateInfo(*pCreateInfo);
6192#if MTMERGESOURCE
6193        // In order to create a valid buffer view, the buffer must have been created with at least one of the
6194        // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
6195        validate_buffer_usage_flags(dev_data, pCreateInfo->buffer,
6196                                    VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, false,
6197                                    "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
6198#endif
6199        loader_platform_thread_unlock_mutex(&globalLock);
6200    }
6201    return result;
6202}
6203
6204VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
6205                                                             const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
6206    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6207
6208    VkResult result = dev_data->device_dispatch_table->CreateImage(device, pCreateInfo, pAllocator, pImage);
6209
6210    if (VK_SUCCESS == result) {
6211        loader_platform_thread_lock_mutex(&globalLock);
6212        IMAGE_LAYOUT_NODE image_node;
6213        image_node.layout = pCreateInfo->initialLayout;
6214        image_node.format = pCreateInfo->format;
6215        dev_data->imageMap[*pImage].createInfo = *pCreateInfo;
6216        ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()};
6217        dev_data->imageSubresourceMap[*pImage].push_back(subpair);
6218        dev_data->imageLayoutMap[subpair] = image_node;
6219        loader_platform_thread_unlock_mutex(&globalLock);
6220    }
6221    return result;
6222}
6223
6224static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
6225    /* expects globalLock to be held by caller */
6226
6227    auto image_node_it = dev_data->imageMap.find(image);
6228    if (image_node_it != dev_data->imageMap.end()) {
6229        /* If the caller used the special values VK_REMAINING_MIP_LEVELS and
6230         * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to
6231         * the actual values.
6232         */
6233        if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
6234            range->levelCount = image_node_it->second.createInfo.mipLevels - range->baseMipLevel;
6235        }
6236
6237        if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
6238            range->layerCount = image_node_it->second.createInfo.arrayLayers - range->baseArrayLayer;
6239        }
6240    }
6241}
6242
6243// Return the correct layer/level counts if the caller used the special
6244// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
6245static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
6246                                         VkImage image) {
6247    /* expects globalLock to be held by caller */
6248
6249    *levels = range.levelCount;
6250    *layers = range.layerCount;
6251    auto image_node_it = dev_data->imageMap.find(image);
6252    if (image_node_it != dev_data->imageMap.end()) {
6253        if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
6254            *levels = image_node_it->second.createInfo.mipLevels - range.baseMipLevel;
6255        }
6256        if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
6257            *layers = image_node_it->second.createInfo.arrayLayers - range.baseArrayLayer;
6258        }
6259    }
6260}
6261
6262VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
6263                                                                 const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
6264    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6265    VkResult result = dev_data->device_dispatch_table->CreateImageView(device, pCreateInfo, pAllocator, pView);
6266    if (VK_SUCCESS == result) {
6267        loader_platform_thread_lock_mutex(&globalLock);
6268        VkImageViewCreateInfo localCI = VkImageViewCreateInfo(*pCreateInfo);
6269        ResolveRemainingLevelsLayers(dev_data, &localCI.subresourceRange, pCreateInfo->image);
6270        dev_data->imageViewMap[*pView] = localCI;
6271#if MTMERGESOURCE
6272        // Validate that img has correct usage flags set
6273        validate_image_usage_flags(dev_data, pCreateInfo->image,
6274                                   VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT |
6275                                       VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
6276                                   false, "vkCreateImageView()", "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT]_BIT");
6277#endif
6278        loader_platform_thread_unlock_mutex(&globalLock);
6279    }
6280    return result;
6281}
6282
6283VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6284vkCreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
6285    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6286    VkResult result = dev_data->device_dispatch_table->CreateFence(device, pCreateInfo, pAllocator, pFence);
6287    if (VK_SUCCESS == result) {
6288        loader_platform_thread_lock_mutex(&globalLock);
6289        FENCE_NODE *pFN = &dev_data->fenceMap[*pFence];
6290#if MTMERGESOURCE
6291        memset(pFN, 0, sizeof(MT_FENCE_INFO));
6292        memcpy(&(pFN->createInfo), pCreateInfo, sizeof(VkFenceCreateInfo));
6293        if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
6294            pFN->firstTimeFlag = true;
6295        }
6296#endif
6297        pFN->in_use.store(0);
6298        loader_platform_thread_unlock_mutex(&globalLock);
6299    }
6300    return result;
6301}
6302
6303// TODO handle pipeline caches
6304VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
6305                                                     const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
6306    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6307    VkResult result = dev_data->device_dispatch_table->CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
6308    return result;
6309}
6310
6311VKAPI_ATTR void VKAPI_CALL
6312vkDestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
6313    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6314    dev_data->device_dispatch_table->DestroyPipelineCache(device, pipelineCache, pAllocator);
6315}
6316
6317VKAPI_ATTR VkResult VKAPI_CALL
6318vkGetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
6319    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6320    VkResult result = dev_data->device_dispatch_table->GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
6321    return result;
6322}
6323
6324VKAPI_ATTR VkResult VKAPI_CALL
6325vkMergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
6326    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6327    VkResult result = dev_data->device_dispatch_table->MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
6328    return result;
6329}
6330
6331// utility function to set collective state for pipeline
6332void set_pipeline_state(PIPELINE_NODE *pPipe) {
6333    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
6334    if (pPipe->graphicsPipelineCI.pColorBlendState) {
6335        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
6336            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
6337                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6338                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6339                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6340                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6341                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6342                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6343                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6344                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
6345                    pPipe->blendConstantsEnabled = true;
6346                }
6347            }
6348        }
6349    }
6350}
6351
6352VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6353vkCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6354                          const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6355                          VkPipeline *pPipelines) {
6356    VkResult result = VK_SUCCESS;
6357    // TODO What to do with pipelineCache?
6358    // The order of operations here is a little convoluted but gets the job done
6359    //  1. Pipeline create state is first shadowed into PIPELINE_NODE struct
6360    //  2. Create state is then validated (which uses flags setup during shadowing)
6361    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
6362    bool skipCall = false;
6363    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6364    vector<PIPELINE_NODE *> pPipeNode(count);
6365    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6366
6367    uint32_t i = 0;
6368    loader_platform_thread_lock_mutex(&globalLock);
6369
6370    for (i = 0; i < count; i++) {
6371        pPipeNode[i] = new PIPELINE_NODE;
6372        pPipeNode[i]->initGraphicsPipeline(&pCreateInfos[i]);
6373        skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode, i);
6374    }
6375
6376    if (!skipCall) {
6377        loader_platform_thread_unlock_mutex(&globalLock);
6378        result = dev_data->device_dispatch_table->CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6379                                                                          pPipelines);
6380        loader_platform_thread_lock_mutex(&globalLock);
6381        for (i = 0; i < count; i++) {
6382            pPipeNode[i]->pipeline = pPipelines[i];
6383            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6384        }
6385        loader_platform_thread_unlock_mutex(&globalLock);
6386    } else {
6387        for (i = 0; i < count; i++) {
6388            delete pPipeNode[i];
6389        }
6390        loader_platform_thread_unlock_mutex(&globalLock);
6391        return VK_ERROR_VALIDATION_FAILED_EXT;
6392    }
6393    return result;
6394}
6395
6396VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6397vkCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6398                         const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6399                         VkPipeline *pPipelines) {
6400    VkResult result = VK_SUCCESS;
6401    bool skipCall = false;
6402
6403    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6404    vector<PIPELINE_NODE *> pPipeNode(count);
6405    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6406
6407    uint32_t i = 0;
6408    loader_platform_thread_lock_mutex(&globalLock);
6409    for (i = 0; i < count; i++) {
6410        // TODO: Verify compute stage bits
6411
6412        // Create and initialize internal tracking data structure
6413        pPipeNode[i] = new PIPELINE_NODE;
6414        pPipeNode[i]->initComputePipeline(&pCreateInfos[i]);
6415        // memcpy(&pPipeNode[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo));
6416
6417        // TODO: Add Compute Pipeline Verification
6418        // skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode[i]);
6419    }
6420
6421    if (!skipCall) {
6422        loader_platform_thread_unlock_mutex(&globalLock);
6423        result = dev_data->device_dispatch_table->CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6424                                                                         pPipelines);
6425        loader_platform_thread_lock_mutex(&globalLock);
6426        for (i = 0; i < count; i++) {
6427            pPipeNode[i]->pipeline = pPipelines[i];
6428            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6429        }
6430        loader_platform_thread_unlock_mutex(&globalLock);
6431    } else {
6432        for (i = 0; i < count; i++) {
6433            // Clean up any locally allocated data structures
6434            delete pPipeNode[i];
6435        }
6436        loader_platform_thread_unlock_mutex(&globalLock);
6437        return VK_ERROR_VALIDATION_FAILED_EXT;
6438    }
6439    return result;
6440}
6441
6442VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
6443                                                               const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
6444    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6445    VkResult result = dev_data->device_dispatch_table->CreateSampler(device, pCreateInfo, pAllocator, pSampler);
6446    if (VK_SUCCESS == result) {
6447        loader_platform_thread_lock_mutex(&globalLock);
6448        dev_data->sampleMap[*pSampler] = unique_ptr<SAMPLER_NODE>(new SAMPLER_NODE(pSampler, pCreateInfo));
6449        loader_platform_thread_unlock_mutex(&globalLock);
6450    }
6451    return result;
6452}
6453
6454VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6455vkCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
6456                            const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
6457    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6458    VkResult result = dev_data->device_dispatch_table->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
6459    if (VK_SUCCESS == result) {
6460        // TODOSC : Capture layout bindings set
6461        LAYOUT_NODE *pNewNode = new LAYOUT_NODE;
6462        if (NULL == pNewNode) {
6463            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
6464                        (uint64_t)*pSetLayout, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6465                        "Out of memory while attempting to allocate LAYOUT_NODE in vkCreateDescriptorSetLayout()"))
6466                return VK_ERROR_VALIDATION_FAILED_EXT;
6467        }
6468        memcpy((void *)&pNewNode->createInfo, pCreateInfo, sizeof(VkDescriptorSetLayoutCreateInfo));
6469        pNewNode->createInfo.pBindings = new VkDescriptorSetLayoutBinding[pCreateInfo->bindingCount];
6470        memcpy((void *)pNewNode->createInfo.pBindings, pCreateInfo->pBindings,
6471               sizeof(VkDescriptorSetLayoutBinding) * pCreateInfo->bindingCount);
6472        // g++ does not like reserve with size 0
6473        if (pCreateInfo->bindingCount)
6474            pNewNode->bindingToIndexMap.reserve(pCreateInfo->bindingCount);
6475        uint32_t totalCount = 0;
6476        for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
6477            if (!pNewNode->bindingToIndexMap.emplace(pCreateInfo->pBindings[i].binding, i).second) {
6478                if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6479                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)*pSetLayout, __LINE__,
6480                            DRAWSTATE_INVALID_LAYOUT, "DS", "duplicated binding number in "
6481                                                            "VkDescriptorSetLayoutBinding"))
6482                    return VK_ERROR_VALIDATION_FAILED_EXT;
6483            } else {
6484                pNewNode->bindingToIndexMap[pCreateInfo->pBindings[i].binding] = i;
6485            }
6486            totalCount += pCreateInfo->pBindings[i].descriptorCount;
6487            if (pCreateInfo->pBindings[i].pImmutableSamplers) {
6488                VkSampler **ppIS = (VkSampler **)&pNewNode->createInfo.pBindings[i].pImmutableSamplers;
6489                *ppIS = new VkSampler[pCreateInfo->pBindings[i].descriptorCount];
6490                memcpy(*ppIS, pCreateInfo->pBindings[i].pImmutableSamplers,
6491                       pCreateInfo->pBindings[i].descriptorCount * sizeof(VkSampler));
6492            }
6493        }
6494        pNewNode->layout = *pSetLayout;
6495        pNewNode->startIndex = 0;
6496        if (totalCount > 0) {
6497            pNewNode->descriptorTypes.resize(totalCount);
6498            pNewNode->stageFlags.resize(totalCount);
6499            uint32_t offset = 0;
6500            uint32_t j = 0;
6501            VkDescriptorType dType;
6502            for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
6503                dType = pCreateInfo->pBindings[i].descriptorType;
6504                for (j = 0; j < pCreateInfo->pBindings[i].descriptorCount; j++) {
6505                    pNewNode->descriptorTypes[offset + j] = dType;
6506                    pNewNode->stageFlags[offset + j] = pCreateInfo->pBindings[i].stageFlags;
6507                    if ((dType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
6508                        (dType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
6509                        pNewNode->dynamicDescriptorCount++;
6510                    }
6511                }
6512                offset += j;
6513            }
6514            pNewNode->endIndex = pNewNode->startIndex + totalCount - 1;
6515        } else { // no descriptors
6516            pNewNode->endIndex = 0;
6517        }
6518        // Put new node at Head of global Layer list
6519        loader_platform_thread_lock_mutex(&globalLock);
6520        dev_data->descriptorSetLayoutMap[*pSetLayout] = pNewNode;
6521        loader_platform_thread_unlock_mutex(&globalLock);
6522    }
6523    return result;
6524}
6525
6526static bool validatePushConstantSize(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
6527                                     const char *caller_name) {
6528    bool skipCall = false;
6529    if ((offset + size) > dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize) {
6530        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6531                           DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that "
6532                                                                 "exceeds this device's maxPushConstantSize of %u.",
6533                           caller_name, offset, size, dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize);
6534    }
6535    return skipCall;
6536}
6537
6538VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
6539                                                      const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
6540    bool skipCall = false;
6541    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6542    uint32_t i = 0;
6543    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6544        skipCall |= validatePushConstantSize(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
6545                                             pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()");
6546        if ((pCreateInfo->pPushConstantRanges[i].size == 0) || ((pCreateInfo->pPushConstantRanges[i].size & 0x3) != 0)) {
6547            skipCall |=
6548                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6549                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constant index %u with "
6550                                                              "size %u. Size must be greater than zero and a multiple of 4.",
6551                        i, pCreateInfo->pPushConstantRanges[i].size);
6552        }
6553        // TODO : Add warning if ranges overlap
6554    }
6555    VkResult result = dev_data->device_dispatch_table->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
6556    if (VK_SUCCESS == result) {
6557        loader_platform_thread_lock_mutex(&globalLock);
6558        // TODOSC : Merge capture of the setLayouts per pipeline
6559        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
6560        plNode.descriptorSetLayouts.resize(pCreateInfo->setLayoutCount);
6561        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
6562            plNode.descriptorSetLayouts[i] = pCreateInfo->pSetLayouts[i];
6563        }
6564        plNode.pushConstantRanges.resize(pCreateInfo->pushConstantRangeCount);
6565        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6566            plNode.pushConstantRanges[i] = pCreateInfo->pPushConstantRanges[i];
6567        }
6568        loader_platform_thread_unlock_mutex(&globalLock);
6569    }
6570    return result;
6571}
6572
6573VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6574vkCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
6575                       VkDescriptorPool *pDescriptorPool) {
6576    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6577    VkResult result = dev_data->device_dispatch_table->CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
6578    if (VK_SUCCESS == result) {
6579        // Insert this pool into Global Pool LL at head
6580        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6581                    (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool %#" PRIxLEAST64,
6582                    (uint64_t)*pDescriptorPool))
6583            return VK_ERROR_VALIDATION_FAILED_EXT;
6584        DESCRIPTOR_POOL_NODE *pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo);
6585        if (NULL == pNewNode) {
6586            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6587                        (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6588                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()"))
6589                return VK_ERROR_VALIDATION_FAILED_EXT;
6590        } else {
6591            loader_platform_thread_lock_mutex(&globalLock);
6592            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
6593            loader_platform_thread_unlock_mutex(&globalLock);
6594        }
6595    } else {
6596        // Need to do anything if pool create fails?
6597    }
6598    return result;
6599}
6600
6601VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6602vkResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
6603    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6604    VkResult result = dev_data->device_dispatch_table->ResetDescriptorPool(device, descriptorPool, flags);
6605    if (VK_SUCCESS == result) {
6606        loader_platform_thread_lock_mutex(&globalLock);
6607        clearDescriptorPool(dev_data, device, descriptorPool, flags);
6608        loader_platform_thread_unlock_mutex(&globalLock);
6609    }
6610    return result;
6611}
6612
6613VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6614vkAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
6615    bool skipCall = false;
6616    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6617
6618    loader_platform_thread_lock_mutex(&globalLock);
6619    // Verify that requested descriptorSets are available in pool
6620    DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool);
6621    if (!pPoolNode) {
6622        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6623                            (uint64_t)pAllocateInfo->descriptorPool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
6624                            "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
6625                            (uint64_t)pAllocateInfo->descriptorPool);
6626    } else { // Make sure pool has all the available descriptors before calling down chain
6627        skipCall |= validate_descriptor_availability_in_pool(dev_data, pPoolNode, pAllocateInfo->descriptorSetCount,
6628                                                             pAllocateInfo->pSetLayouts);
6629    }
6630    loader_platform_thread_unlock_mutex(&globalLock);
6631    if (skipCall)
6632        return VK_ERROR_VALIDATION_FAILED_EXT;
6633    VkResult result = dev_data->device_dispatch_table->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
6634    if (VK_SUCCESS == result) {
6635        loader_platform_thread_lock_mutex(&globalLock);
6636        DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool);
6637        if (pPoolNode) {
6638            if (pAllocateInfo->descriptorSetCount == 0) {
6639                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6640                        pAllocateInfo->descriptorSetCount, __LINE__, DRAWSTATE_NONE, "DS",
6641                        "AllocateDescriptorSets called with 0 count");
6642            }
6643            for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
6644                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6645                        (uint64_t)pDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS", "Created Descriptor Set %#" PRIxLEAST64,
6646                        (uint64_t)pDescriptorSets[i]);
6647                // Create new set node and add to head of pool nodes
6648                SET_NODE *pNewNode = new SET_NODE;
6649                if (NULL == pNewNode) {
6650                    if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6651                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6652                                DRAWSTATE_OUT_OF_MEMORY, "DS",
6653                                "Out of memory while attempting to allocate SET_NODE in vkAllocateDescriptorSets()")) {
6654                        loader_platform_thread_unlock_mutex(&globalLock);
6655                        return VK_ERROR_VALIDATION_FAILED_EXT;
6656                    }
6657                } else {
6658                    // TODO : Pool should store a total count of each type of Descriptor available
6659                    //  When descriptors are allocated, decrement the count and validate here
6660                    //  that the count doesn't go below 0. One reset/free need to bump count back up.
6661                    // Insert set at head of Set LL for this pool
6662                    pNewNode->pNext = pPoolNode->pSets;
6663                    pNewNode->in_use.store(0);
6664                    pPoolNode->pSets = pNewNode;
6665                    LAYOUT_NODE *pLayout = getLayoutNode(dev_data, pAllocateInfo->pSetLayouts[i]);
6666                    if (NULL == pLayout) {
6667                        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6668                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)pAllocateInfo->pSetLayouts[i],
6669                                    __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
6670                                    "Unable to find set layout node for layout %#" PRIxLEAST64
6671                                    " specified in vkAllocateDescriptorSets() call",
6672                                    (uint64_t)pAllocateInfo->pSetLayouts[i])) {
6673                            loader_platform_thread_unlock_mutex(&globalLock);
6674                            return VK_ERROR_VALIDATION_FAILED_EXT;
6675                        }
6676                    }
6677                    pNewNode->pLayout = pLayout;
6678                    pNewNode->pool = pAllocateInfo->descriptorPool;
6679                    pNewNode->set = pDescriptorSets[i];
6680                    pNewNode->descriptorCount = (pLayout->createInfo.bindingCount != 0) ? pLayout->endIndex + 1 : 0;
6681                    if (pNewNode->descriptorCount) {
6682                        pNewNode->pDescriptorUpdates.resize(pNewNode->descriptorCount);
6683                    }
6684                    dev_data->setMap[pDescriptorSets[i]] = pNewNode;
6685                }
6686            }
6687        }
6688        loader_platform_thread_unlock_mutex(&globalLock);
6689    }
6690    return result;
6691}
6692
6693VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6694vkFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
6695    bool skipCall = false;
6696    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6697    // Make sure that no sets being destroyed are in-flight
6698    loader_platform_thread_lock_mutex(&globalLock);
6699    for (uint32_t i = 0; i < count; ++i)
6700        skipCall |= validateIdleDescriptorSet(dev_data, pDescriptorSets[i], "vkFreeDescriptorSets");
6701    DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, descriptorPool);
6702    if (pPoolNode && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pPoolNode->createInfo.flags)) {
6703        // Can't Free from a NON_FREE pool
6704        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
6705                            (uint64_t)device, __LINE__, DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, "DS",
6706                            "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
6707                            "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
6708    }
6709    loader_platform_thread_unlock_mutex(&globalLock);
6710    if (skipCall)
6711        return VK_ERROR_VALIDATION_FAILED_EXT;
6712    VkResult result = dev_data->device_dispatch_table->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
6713    if (VK_SUCCESS == result) {
6714        loader_platform_thread_lock_mutex(&globalLock);
6715
6716        // Update available descriptor sets in pool
6717        pPoolNode->availableSets += count;
6718
6719        // For each freed descriptor add it back into the pool as available
6720        for (uint32_t i = 0; i < count; ++i) {
6721            SET_NODE *pSet = dev_data->setMap[pDescriptorSets[i]]; // getSetNode() without locking
6722            invalidateBoundCmdBuffers(dev_data, pSet);
6723            LAYOUT_NODE *pLayout = pSet->pLayout;
6724            uint32_t typeIndex = 0, poolSizeCount = 0;
6725            for (uint32_t j = 0; j < pLayout->createInfo.bindingCount; ++j) {
6726                typeIndex = static_cast<uint32_t>(pLayout->createInfo.pBindings[j].descriptorType);
6727                poolSizeCount = pLayout->createInfo.pBindings[j].descriptorCount;
6728                pPoolNode->availableDescriptorTypeCount[typeIndex] += poolSizeCount;
6729            }
6730        }
6731        loader_platform_thread_unlock_mutex(&globalLock);
6732    }
6733    // TODO : Any other clean-up or book-keeping to do here?
6734    return result;
6735}
6736
6737VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6738vkUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
6739                       uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
6740    // dsUpdate will return true only if a bailout error occurs, so we want to call down tree when update returns false
6741    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6742    loader_platform_thread_lock_mutex(&globalLock);
6743    bool rtn = dsUpdate(dev_data, device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
6744    loader_platform_thread_unlock_mutex(&globalLock);
6745    if (!rtn) {
6746        dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6747                                                              pDescriptorCopies);
6748    }
6749}
6750
6751VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6752vkAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
6753    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6754    VkResult result = dev_data->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
6755    if (VK_SUCCESS == result) {
6756        loader_platform_thread_lock_mutex(&globalLock);
6757        auto const &cp_it = dev_data->commandPoolMap.find(pCreateInfo->commandPool);
6758        if (cp_it != dev_data->commandPoolMap.end()) {
6759            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
6760                // Add command buffer to its commandPool map
6761                cp_it->second.commandBuffers.push_back(pCommandBuffer[i]);
6762                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
6763                // Add command buffer to map
6764                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
6765                resetCB(dev_data, pCommandBuffer[i]);
6766                pCB->createInfo = *pCreateInfo;
6767                pCB->device = device;
6768            }
6769        }
6770#if MTMERGESOURCE
6771        printCBList(dev_data);
6772#endif
6773        loader_platform_thread_unlock_mutex(&globalLock);
6774    }
6775    return result;
6776}
6777
6778VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6779vkBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
6780    bool skipCall = false;
6781    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6782    loader_platform_thread_lock_mutex(&globalLock);
6783    // Validate command buffer level
6784    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6785    if (pCB) {
6786        bool commandBufferComplete = false;
6787        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
6788        skipCall = checkCBCompleted(dev_data, commandBuffer, &commandBufferComplete);
6789        clear_cmd_buf_and_mem_references(dev_data, pCB);
6790
6791        if (!commandBufferComplete) {
6792            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6793                                (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6794                                "Calling vkBeginCommandBuffer() on active CB %p before it has completed. "
6795                                "You must check CB flag before this call.",
6796                                commandBuffer);
6797        }
6798        if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
6799            // Secondary Command Buffer
6800            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
6801            if (!pInfo) {
6802                skipCall |=
6803                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6804                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6805                            "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must have inheritance info.",
6806                            reinterpret_cast<void *>(commandBuffer));
6807            } else {
6808                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
6809                    if (!pInfo->renderPass) { // renderpass should NOT be null for a Secondary CB
6810                        skipCall |= log_msg(
6811                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6812                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6813                            "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must specify a valid renderpass parameter.",
6814                            reinterpret_cast<void *>(commandBuffer));
6815                    }
6816                    if (!pInfo->framebuffer) { // framebuffer may be null for a Secondary CB, but this affects perf
6817                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
6818                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6819                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE,
6820                                            "DS", "vkBeginCommandBuffer(): Secondary Command Buffers (%p) may perform better if a "
6821                                                  "valid framebuffer parameter is specified.",
6822                                            reinterpret_cast<void *>(commandBuffer));
6823                    } else {
6824                        string errorString = "";
6825                        auto fbNode = dev_data->frameBufferMap.find(pInfo->framebuffer);
6826                        if (fbNode != dev_data->frameBufferMap.end()) {
6827                            VkRenderPass fbRP = fbNode->second.createInfo.renderPass;
6828                            if (!verify_renderpass_compatibility(dev_data, fbRP, pInfo->renderPass, errorString)) {
6829                                // renderPass that framebuffer was created with must be compatible with local renderPass
6830                                skipCall |=
6831                                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6832                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6833                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
6834                                            "DS", "vkBeginCommandBuffer(): Secondary Command "
6835                                                  "Buffer (%p) renderPass (%#" PRIxLEAST64 ") is incompatible w/ framebuffer "
6836                                                  "(%#" PRIxLEAST64 ") w/ render pass (%#" PRIxLEAST64 ") due to: %s",
6837                                            reinterpret_cast<void *>(commandBuffer), (uint64_t)(pInfo->renderPass),
6838                                            (uint64_t)(pInfo->framebuffer), (uint64_t)(fbRP), errorString.c_str());
6839                            }
6840                            // Connect this framebuffer to this cmdBuffer
6841                            fbNode->second.referencingCmdBuffers.insert(pCB->commandBuffer);
6842                        }
6843                    }
6844                }
6845                if ((pInfo->occlusionQueryEnable == VK_FALSE ||
6846                     dev_data->phys_dev_properties.features.occlusionQueryPrecise == VK_FALSE) &&
6847                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
6848                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6849                                        VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
6850                                        __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6851                                        "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must not have "
6852                                        "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
6853                                        "support precise occlusion queries.",
6854                                        reinterpret_cast<void *>(commandBuffer));
6855                }
6856            }
6857            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
6858                auto rp_data = dev_data->renderPassMap.find(pInfo->renderPass);
6859                if (rp_data != dev_data->renderPassMap.end() && rp_data->second && rp_data->second->pCreateInfo) {
6860                    if (pInfo->subpass >= rp_data->second->pCreateInfo->subpassCount) {
6861                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6862                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
6863                                            DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6864                                            "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must has a subpass index (%d) "
6865                                            "that is less than the number of subpasses (%d).",
6866                                            (void *)commandBuffer, pInfo->subpass, rp_data->second->pCreateInfo->subpassCount);
6867                    }
6868                }
6869            }
6870        }
6871        if (CB_RECORDING == pCB->state) {
6872            skipCall |=
6873                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6874                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6875                        "vkBeginCommandBuffer(): Cannot call Begin on CB (%#" PRIxLEAST64
6876                        ") in the RECORDING state. Must first call vkEndCommandBuffer().",
6877                        (uint64_t)commandBuffer);
6878        } else if (CB_RECORDED == pCB->state) {
6879            VkCommandPool cmdPool = pCB->createInfo.commandPool;
6880            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
6881                skipCall |=
6882                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6883                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6884                            "Call to vkBeginCommandBuffer() on command buffer (%#" PRIxLEAST64
6885                            ") attempts to implicitly reset cmdBuffer created from command pool (%#" PRIxLEAST64
6886                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6887                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
6888            }
6889            resetCB(dev_data, commandBuffer);
6890        }
6891        // Set updated state here in case implicit reset occurs above
6892        pCB->state = CB_RECORDING;
6893        pCB->beginInfo = *pBeginInfo;
6894        if (pCB->beginInfo.pInheritanceInfo) {
6895            pCB->inheritanceInfo = *(pCB->beginInfo.pInheritanceInfo);
6896            pCB->beginInfo.pInheritanceInfo = &pCB->inheritanceInfo;
6897        }
6898    } else {
6899        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6900                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
6901                            "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB %p!", (void *)commandBuffer);
6902    }
6903    loader_platform_thread_unlock_mutex(&globalLock);
6904    if (skipCall) {
6905        return VK_ERROR_VALIDATION_FAILED_EXT;
6906    }
6907    VkResult result = dev_data->device_dispatch_table->BeginCommandBuffer(commandBuffer, pBeginInfo);
6908
6909    return result;
6910}
6911
6912VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(VkCommandBuffer commandBuffer) {
6913    bool skipCall = false;
6914    VkResult result = VK_SUCCESS;
6915    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6916    loader_platform_thread_lock_mutex(&globalLock);
6917    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6918    if (pCB) {
6919        if (pCB->state != CB_RECORDING) {
6920            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkEndCommandBuffer()");
6921        }
6922        for (auto query : pCB->activeQueries) {
6923            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6924                                DRAWSTATE_INVALID_QUERY, "DS",
6925                                "Ending command buffer with in progress query: queryPool %" PRIu64 ", index %d",
6926                                (uint64_t)(query.pool), query.index);
6927        }
6928    }
6929    if (!skipCall) {
6930        loader_platform_thread_unlock_mutex(&globalLock);
6931        result = dev_data->device_dispatch_table->EndCommandBuffer(commandBuffer);
6932        loader_platform_thread_lock_mutex(&globalLock);
6933        if (VK_SUCCESS == result) {
6934            pCB->state = CB_RECORDED;
6935            // Reset CB status flags
6936            pCB->status = 0;
6937            printCB(dev_data, commandBuffer);
6938        }
6939    } else {
6940        result = VK_ERROR_VALIDATION_FAILED_EXT;
6941    }
6942    loader_platform_thread_unlock_mutex(&globalLock);
6943    return result;
6944}
6945
6946VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6947vkResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
6948    bool skipCall = false;
6949    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6950    loader_platform_thread_lock_mutex(&globalLock);
6951    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6952    VkCommandPool cmdPool = pCB->createInfo.commandPool;
6953    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
6954        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6955                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6956                            "Attempt to reset command buffer (%#" PRIxLEAST64 ") created from command pool (%#" PRIxLEAST64
6957                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6958                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
6959    }
6960    if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
6961        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6962                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6963                            "Attempt to reset command buffer (%#" PRIxLEAST64 ") which is in use.",
6964                            reinterpret_cast<uint64_t>(commandBuffer));
6965    }
6966    loader_platform_thread_unlock_mutex(&globalLock);
6967    if (skipCall)
6968        return VK_ERROR_VALIDATION_FAILED_EXT;
6969    VkResult result = dev_data->device_dispatch_table->ResetCommandBuffer(commandBuffer, flags);
6970    if (VK_SUCCESS == result) {
6971        loader_platform_thread_lock_mutex(&globalLock);
6972        resetCB(dev_data, commandBuffer);
6973        loader_platform_thread_unlock_mutex(&globalLock);
6974    }
6975    return result;
6976}
6977#if MTMERGESOURCE
6978// TODO : For any vkCmdBind* calls that include an object which has mem bound to it,
6979//    need to account for that mem now having binding to given commandBuffer
6980#endif
6981VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6982vkCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
6983    bool skipCall = false;
6984    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6985    loader_platform_thread_lock_mutex(&globalLock);
6986    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6987    if (pCB) {
6988        skipCall |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
6989        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) {
6990            skipCall |=
6991                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
6992                        (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
6993                        "Incorrectly binding compute pipeline (%#" PRIxLEAST64 ") during active RenderPass (%#" PRIxLEAST64 ")",
6994                        (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass);
6995        }
6996
6997        PIPELINE_NODE *pPN = getPipeline(dev_data, pipeline);
6998        if (pPN) {
6999            pCB->lastBound[pipelineBindPoint].pipeline = pipeline;
7000            set_cb_pso_status(pCB, pPN);
7001            set_pipeline_state(pPN);
7002            skipCall |= validatePipelineState(dev_data, pCB, pipelineBindPoint, pipeline);
7003        } else {
7004            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7005                                (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
7006                                "Attempt to bind Pipeline %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline));
7007        }
7008    }
7009    loader_platform_thread_unlock_mutex(&globalLock);
7010    if (!skipCall)
7011        dev_data->device_dispatch_table->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
7012}
7013
7014VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7015vkCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
7016    bool skipCall = false;
7017    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7018    loader_platform_thread_lock_mutex(&globalLock);
7019    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7020    if (pCB) {
7021        skipCall |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
7022        pCB->status |= CBSTATUS_VIEWPORT_SET;
7023        pCB->viewports.resize(viewportCount);
7024        memcpy(pCB->viewports.data(), pViewports, viewportCount * sizeof(VkViewport));
7025    }
7026    loader_platform_thread_unlock_mutex(&globalLock);
7027    if (!skipCall)
7028        dev_data->device_dispatch_table->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
7029}
7030
7031VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7032vkCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
7033    bool skipCall = false;
7034    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7035    loader_platform_thread_lock_mutex(&globalLock);
7036    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7037    if (pCB) {
7038        skipCall |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
7039        pCB->status |= CBSTATUS_SCISSOR_SET;
7040        pCB->scissors.resize(scissorCount);
7041        memcpy(pCB->scissors.data(), pScissors, scissorCount * sizeof(VkRect2D));
7042    }
7043    loader_platform_thread_unlock_mutex(&globalLock);
7044    if (!skipCall)
7045        dev_data->device_dispatch_table->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
7046}
7047
7048VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
7049    bool skipCall = false;
7050    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7051    loader_platform_thread_lock_mutex(&globalLock);
7052    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7053    if (pCB) {
7054        skipCall |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
7055        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
7056    }
7057    loader_platform_thread_unlock_mutex(&globalLock);
7058    if (!skipCall)
7059        dev_data->device_dispatch_table->CmdSetLineWidth(commandBuffer, lineWidth);
7060}
7061
7062VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7063vkCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
7064    bool skipCall = false;
7065    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7066    loader_platform_thread_lock_mutex(&globalLock);
7067    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7068    if (pCB) {
7069        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
7070        pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
7071    }
7072    loader_platform_thread_unlock_mutex(&globalLock);
7073    if (!skipCall)
7074        dev_data->device_dispatch_table->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp,
7075                                                         depthBiasSlopeFactor);
7076}
7077
7078VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
7079    bool skipCall = false;
7080    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7081    loader_platform_thread_lock_mutex(&globalLock);
7082    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7083    if (pCB) {
7084        skipCall |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
7085        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
7086    }
7087    loader_platform_thread_unlock_mutex(&globalLock);
7088    if (!skipCall)
7089        dev_data->device_dispatch_table->CmdSetBlendConstants(commandBuffer, blendConstants);
7090}
7091
7092VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7093vkCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
7094    bool skipCall = false;
7095    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7096    loader_platform_thread_lock_mutex(&globalLock);
7097    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7098    if (pCB) {
7099        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
7100        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
7101    }
7102    loader_platform_thread_unlock_mutex(&globalLock);
7103    if (!skipCall)
7104        dev_data->device_dispatch_table->CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
7105}
7106
7107VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7108vkCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
7109    bool skipCall = false;
7110    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7111    loader_platform_thread_lock_mutex(&globalLock);
7112    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7113    if (pCB) {
7114        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
7115        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
7116    }
7117    loader_platform_thread_unlock_mutex(&globalLock);
7118    if (!skipCall)
7119        dev_data->device_dispatch_table->CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
7120}
7121
7122VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7123vkCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
7124    bool skipCall = false;
7125    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7126    loader_platform_thread_lock_mutex(&globalLock);
7127    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7128    if (pCB) {
7129        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
7130        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
7131    }
7132    loader_platform_thread_unlock_mutex(&globalLock);
7133    if (!skipCall)
7134        dev_data->device_dispatch_table->CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
7135}
7136
7137VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7138vkCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
7139    bool skipCall = false;
7140    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7141    loader_platform_thread_lock_mutex(&globalLock);
7142    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7143    if (pCB) {
7144        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
7145        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
7146    }
7147    loader_platform_thread_unlock_mutex(&globalLock);
7148    if (!skipCall)
7149        dev_data->device_dispatch_table->CmdSetStencilReference(commandBuffer, faceMask, reference);
7150}
7151
7152VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7153vkCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
7154                        uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
7155                        const uint32_t *pDynamicOffsets) {
7156    bool skipCall = false;
7157    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7158    loader_platform_thread_lock_mutex(&globalLock);
7159    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7160    if (pCB) {
7161        if (pCB->state == CB_RECORDING) {
7162            // Track total count of dynamic descriptor types to make sure we have an offset for each one
7163            uint32_t totalDynamicDescriptors = 0;
7164            string errorString = "";
7165            uint32_t lastSetIndex = firstSet + setCount - 1;
7166            if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size())
7167                pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7168            VkDescriptorSet oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
7169            for (uint32_t i = 0; i < setCount; i++) {
7170                SET_NODE *pSet = getSetNode(dev_data, pDescriptorSets[i]);
7171                if (pSet) {
7172                    pCB->lastBound[pipelineBindPoint].uniqueBoundSets.insert(pDescriptorSets[i]);
7173                    pSet->boundCmdBuffers.insert(commandBuffer);
7174                    pCB->lastBound[pipelineBindPoint].pipelineLayout = layout;
7175                    pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pDescriptorSets[i];
7176                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7177                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7178                                        DRAWSTATE_NONE, "DS", "DS %#" PRIxLEAST64 " bound on pipeline %s",
7179                                        (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
7180                    if (!pSet->pUpdateStructs && (pSet->descriptorCount != 0)) {
7181                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
7182                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
7183                                            __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
7184                                            "DS %#" PRIxLEAST64
7185                                            " bound but it was never updated. You may want to either update it or not bind it.",
7186                                            (uint64_t)pDescriptorSets[i]);
7187                    }
7188                    // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
7189                    if (!verify_set_layout_compatibility(dev_data, pSet, layout, i + firstSet, errorString)) {
7190                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7191                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
7192                                            __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
7193                                            "descriptorSet #%u being bound is not compatible with overlapping layout in "
7194                                            "pipelineLayout due to: %s",
7195                                            i, errorString.c_str());
7196                    }
7197                    if (pSet->pLayout->dynamicDescriptorCount) {
7198                        // First make sure we won't overstep bounds of pDynamicOffsets array
7199                        if ((totalDynamicDescriptors + pSet->pLayout->dynamicDescriptorCount) > dynamicOffsetCount) {
7200                            skipCall |=
7201                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7202                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7203                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7204                                        "descriptorSet #%u (%#" PRIxLEAST64
7205                                        ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
7206                                        "array. There must be one dynamic offset for each dynamic descriptor being bound.",
7207                                        i, (uint64_t)pDescriptorSets[i], pSet->pLayout->dynamicDescriptorCount,
7208                                        (dynamicOffsetCount - totalDynamicDescriptors));
7209                        } else { // Validate and store dynamic offsets with the set
7210                            // Validate Dynamic Offset Minimums
7211                            uint32_t cur_dyn_offset = totalDynamicDescriptors;
7212                            for (uint32_t d = 0; d < pSet->descriptorCount; d++) {
7213                                if (pSet->pLayout->descriptorTypes[d] == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
7214                                    if (vk_safe_modulo(
7215                                            pDynamicOffsets[cur_dyn_offset],
7216                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
7217                                        skipCall |= log_msg(
7218                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7219                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7220                                            DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
7221                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7222                                            "device limit minUniformBufferOffsetAlignment %#" PRIxLEAST64,
7223                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7224                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
7225                                    }
7226                                    cur_dyn_offset++;
7227                                } else if (pSet->pLayout->descriptorTypes[d] == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
7228                                    if (vk_safe_modulo(
7229                                            pDynamicOffsets[cur_dyn_offset],
7230                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
7231                                        skipCall |= log_msg(
7232                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7233                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7234                                            DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
7235                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7236                                            "device limit minStorageBufferOffsetAlignment %#" PRIxLEAST64,
7237                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7238                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
7239                                    }
7240                                    cur_dyn_offset++;
7241                                }
7242                            }
7243                            // Keep running total of dynamic descriptor count to verify at the end
7244                            totalDynamicDescriptors += pSet->pLayout->dynamicDescriptorCount;
7245                        }
7246                    }
7247                } else {
7248                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7249                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7250                                        DRAWSTATE_INVALID_SET, "DS", "Attempt to bind DS %#" PRIxLEAST64 " that doesn't exist!",
7251                                        (uint64_t)pDescriptorSets[i]);
7252                }
7253                skipCall |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
7254                // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
7255                if (firstSet > 0) { // Check set #s below the first bound set
7256                    for (uint32_t i = 0; i < firstSet; ++i) {
7257                        if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
7258                            !verify_set_layout_compatibility(
7259                                dev_data, dev_data->setMap[pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i]], layout, i,
7260                                errorString)) {
7261                            skipCall |= log_msg(
7262                                dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7263                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
7264                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
7265                                "DescriptorSetDS %#" PRIxLEAST64
7266                                " previously bound as set #%u was disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")",
7267                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
7268                            pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
7269                        }
7270                    }
7271                }
7272                // Check if newly last bound set invalidates any remaining bound sets
7273                if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
7274                    if (oldFinalBoundSet &&
7275                        !verify_set_layout_compatibility(dev_data, dev_data->setMap[oldFinalBoundSet], layout, lastSetIndex,
7276                                                         errorString)) {
7277                        skipCall |=
7278                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7279                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)oldFinalBoundSet, __LINE__,
7280                                    DRAWSTATE_NONE, "DS", "DescriptorSetDS %#" PRIxLEAST64
7281                                                          " previously bound as set #%u is incompatible with set %#" PRIxLEAST64
7282                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
7283                                                          "disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")",
7284                                    (uint64_t)oldFinalBoundSet, lastSetIndex,
7285                                    (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
7286                                    lastSetIndex + 1, (uint64_t)layout);
7287                        pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7288                    }
7289                }
7290            }
7291            //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7292            if (totalDynamicDescriptors != dynamicOffsetCount) {
7293                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7294                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
7295                                    DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7296                                    "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7297                                    "is %u. It should exactly match the number of dynamic descriptors.",
7298                                    setCount, totalDynamicDescriptors, dynamicOffsetCount);
7299            }
7300            // Save dynamicOffsets bound to this CB
7301            for (uint32_t i = 0; i < dynamicOffsetCount; i++) {
7302                pCB->lastBound[pipelineBindPoint].dynamicOffsets.emplace_back(pDynamicOffsets[i]);
7303            }
7304        } else {
7305            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
7306        }
7307    }
7308    loader_platform_thread_unlock_mutex(&globalLock);
7309    if (!skipCall)
7310        dev_data->device_dispatch_table->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
7311                                                               pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
7312}
7313
7314VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7315vkCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
7316    bool skipCall = false;
7317    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7318    loader_platform_thread_lock_mutex(&globalLock);
7319#if MTMERGESOURCE
7320    VkDeviceMemory mem;
7321    skipCall =
7322        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7323    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7324    if (cb_data != dev_data->commandBufferMap.end()) {
7325        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindIndexBuffer()"); };
7326        cb_data->second->validate_functions.push_back(function);
7327    }
7328    // TODO : Somewhere need to verify that IBs have correct usage state flagged
7329#endif
7330    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7331    if (pCB) {
7332        skipCall |= addCmd(dev_data, pCB, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
7333        VkDeviceSize offset_align = 0;
7334        switch (indexType) {
7335        case VK_INDEX_TYPE_UINT16:
7336            offset_align = 2;
7337            break;
7338        case VK_INDEX_TYPE_UINT32:
7339            offset_align = 4;
7340            break;
7341        default:
7342            // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
7343            break;
7344        }
7345        if (!offset_align || (offset % offset_align)) {
7346            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7347                                DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
7348                                "vkCmdBindIndexBuffer() offset (%#" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
7349                                offset, string_VkIndexType(indexType));
7350        }
7351        pCB->status |= CBSTATUS_INDEX_BUFFER_BOUND;
7352    }
7353    loader_platform_thread_unlock_mutex(&globalLock);
7354    if (!skipCall)
7355        dev_data->device_dispatch_table->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
7356}
7357
7358void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
7359    uint32_t end = firstBinding + bindingCount;
7360    if (pCB->currentDrawData.buffers.size() < end) {
7361        pCB->currentDrawData.buffers.resize(end);
7362    }
7363    for (uint32_t i = 0; i < bindingCount; ++i) {
7364        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
7365    }
7366}
7367
7368static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
7369
7370VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
7371                                                                  uint32_t bindingCount, const VkBuffer *pBuffers,
7372                                                                  const VkDeviceSize *pOffsets) {
7373    bool skipCall = false;
7374    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7375    loader_platform_thread_lock_mutex(&globalLock);
7376#if MTMERGESOURCE
7377    for (uint32_t i = 0; i < bindingCount; ++i) {
7378        VkDeviceMemory mem;
7379        skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)pBuffers[i], VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7380        auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7381        if (cb_data != dev_data->commandBufferMap.end()) {
7382            std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindVertexBuffers()"); };
7383            cb_data->second->validate_functions.push_back(function);
7384        }
7385    }
7386    // TODO : Somewhere need to verify that VBs have correct usage state flagged
7387#endif
7388    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7389    if (pCB) {
7390        addCmd(dev_data, pCB, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
7391        updateResourceTracking(pCB, firstBinding, bindingCount, pBuffers);
7392    } else {
7393        skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
7394    }
7395    loader_platform_thread_unlock_mutex(&globalLock);
7396    if (!skipCall)
7397        dev_data->device_dispatch_table->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
7398}
7399
7400/* expects globalLock to be held by caller */
7401static bool markStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
7402    bool skip_call = false;
7403
7404    for (auto imageView : pCB->updateImages) {
7405        auto iv_data = dev_data->imageViewMap.find(imageView);
7406        if (iv_data == dev_data->imageViewMap.end())
7407            continue;
7408        VkImage image = iv_data->second.image;
7409        VkDeviceMemory mem;
7410        skip_call |=
7411            get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7412        std::function<bool()> function = [=]() {
7413            set_memory_valid(dev_data, mem, true, image);
7414            return false;
7415        };
7416        pCB->validate_functions.push_back(function);
7417    }
7418    for (auto buffer : pCB->updateBuffers) {
7419        VkDeviceMemory mem;
7420        skip_call |= get_mem_binding_from_object(dev_data, (uint64_t)buffer,
7421                                                 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7422        std::function<bool()> function = [=]() {
7423            set_memory_valid(dev_data, mem, true);
7424            return false;
7425        };
7426        pCB->validate_functions.push_back(function);
7427    }
7428    return skip_call;
7429}
7430
7431VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
7432                                                     uint32_t firstVertex, uint32_t firstInstance) {
7433    bool skipCall = false;
7434    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7435    loader_platform_thread_lock_mutex(&globalLock);
7436    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7437    if (pCB) {
7438        skipCall |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()");
7439        pCB->drawCount[DRAW]++;
7440        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS);
7441        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7442        // TODO : Need to pass commandBuffer as srcObj here
7443        skipCall |=
7444            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7445                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW]++);
7446        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7447        if (!skipCall) {
7448            updateResourceTrackingOnDraw(pCB);
7449        }
7450        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDraw");
7451    }
7452    loader_platform_thread_unlock_mutex(&globalLock);
7453    if (!skipCall)
7454        dev_data->device_dispatch_table->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
7455}
7456
7457VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
7458                                                            uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
7459                                                            uint32_t firstInstance) {
7460    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7461    bool skipCall = false;
7462    loader_platform_thread_lock_mutex(&globalLock);
7463    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7464    if (pCB) {
7465        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()");
7466        pCB->drawCount[DRAW_INDEXED]++;
7467        skipCall |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS);
7468        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7469        // TODO : Need to pass commandBuffer as srcObj here
7470        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7471                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7472                            "vkCmdDrawIndexed() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++);
7473        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7474        if (!skipCall) {
7475            updateResourceTrackingOnDraw(pCB);
7476        }
7477        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed");
7478    }
7479    loader_platform_thread_unlock_mutex(&globalLock);
7480    if (!skipCall)
7481        dev_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset,
7482                                                        firstInstance);
7483}
7484
7485VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7486vkCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7487    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7488    bool skipCall = false;
7489    loader_platform_thread_lock_mutex(&globalLock);
7490#if MTMERGESOURCE
7491    VkDeviceMemory mem;
7492    // MTMTODO : merge with code below
7493    skipCall =
7494        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7495    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndirect");
7496#endif
7497    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7498    if (pCB) {
7499        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()");
7500        pCB->drawCount[DRAW_INDIRECT]++;
7501        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS);
7502        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7503        // TODO : Need to pass commandBuffer as srcObj here
7504        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7505                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7506                            "vkCmdDrawIndirect() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++);
7507        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7508        if (!skipCall) {
7509            updateResourceTrackingOnDraw(pCB);
7510        }
7511        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndirect");
7512    }
7513    loader_platform_thread_unlock_mutex(&globalLock);
7514    if (!skipCall)
7515        dev_data->device_dispatch_table->CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
7516}
7517
7518VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7519vkCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7520    bool skipCall = false;
7521    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7522    loader_platform_thread_lock_mutex(&globalLock);
7523#if MTMERGESOURCE
7524    VkDeviceMemory mem;
7525    // MTMTODO : merge with code below
7526    skipCall =
7527        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7528    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndexedIndirect");
7529#endif
7530    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7531    if (pCB) {
7532        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()");
7533        pCB->drawCount[DRAW_INDEXED_INDIRECT]++;
7534        skipCall |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS);
7535        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7536        // TODO : Need to pass commandBuffer as srcObj here
7537        skipCall |=
7538            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7539                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call #%" PRIu64 ", reporting DS state:",
7540                    g_drawCount[DRAW_INDEXED_INDIRECT]++);
7541        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7542        if (!skipCall) {
7543            updateResourceTrackingOnDraw(pCB);
7544        }
7545        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexedIndirect");
7546    }
7547    loader_platform_thread_unlock_mutex(&globalLock);
7548    if (!skipCall)
7549        dev_data->device_dispatch_table->CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
7550}
7551
7552VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
7553    bool skipCall = false;
7554    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7555    loader_platform_thread_lock_mutex(&globalLock);
7556    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7557    if (pCB) {
7558        // TODO : Re-enable validate_and_update_draw_state() when it supports compute shaders
7559        // skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE);
7560        // TODO : Call below is temporary until call above can be re-enabled
7561        update_shader_storage_images_and_buffers(dev_data, pCB);
7562        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7563        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()");
7564        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatch");
7565    }
7566    loader_platform_thread_unlock_mutex(&globalLock);
7567    if (!skipCall)
7568        dev_data->device_dispatch_table->CmdDispatch(commandBuffer, x, y, z);
7569}
7570
7571VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7572vkCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
7573    bool skipCall = false;
7574    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7575    loader_platform_thread_lock_mutex(&globalLock);
7576#if MTMERGESOURCE
7577    VkDeviceMemory mem;
7578    skipCall =
7579        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7580    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDispatchIndirect");
7581#endif
7582    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7583    if (pCB) {
7584        // TODO : Re-enable validate_and_update_draw_state() when it supports compute shaders
7585        // skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE);
7586        // TODO : Call below is temporary until call above can be re-enabled
7587        update_shader_storage_images_and_buffers(dev_data, pCB);
7588        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7589        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()");
7590        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatchIndirect");
7591    }
7592    loader_platform_thread_unlock_mutex(&globalLock);
7593    if (!skipCall)
7594        dev_data->device_dispatch_table->CmdDispatchIndirect(commandBuffer, buffer, offset);
7595}
7596
7597VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
7598                                                           uint32_t regionCount, const VkBufferCopy *pRegions) {
7599    bool skipCall = false;
7600    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7601    loader_platform_thread_lock_mutex(&globalLock);
7602#if MTMERGESOURCE
7603    VkDeviceMemory mem;
7604    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7605    skipCall =
7606        get_mem_binding_from_object(dev_data, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7607    if (cb_data != dev_data->commandBufferMap.end()) {
7608        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyBuffer()"); };
7609        cb_data->second->validate_functions.push_back(function);
7610    }
7611    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBuffer");
7612    skipCall |=
7613        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7614    if (cb_data != dev_data->commandBufferMap.end()) {
7615        std::function<bool()> function = [=]() {
7616            set_memory_valid(dev_data, mem, true);
7617            return false;
7618        };
7619        cb_data->second->validate_functions.push_back(function);
7620    }
7621    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBuffer");
7622    // Validate that SRC & DST buffers have correct usage flags set
7623    skipCall |= validate_buffer_usage_flags(dev_data, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
7624                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7625    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7626                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7627#endif
7628    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7629    if (pCB) {
7630        skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
7631        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBuffer");
7632    }
7633    loader_platform_thread_unlock_mutex(&globalLock);
7634    if (!skipCall)
7635        dev_data->device_dispatch_table->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
7636}
7637
7638static bool VerifySourceImageLayout(VkCommandBuffer cmdBuffer, VkImage srcImage, VkImageSubresourceLayers subLayers,
7639                                    VkImageLayout srcImageLayout) {
7640    bool skip_call = false;
7641
7642    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7643    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7644    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7645        uint32_t layer = i + subLayers.baseArrayLayer;
7646        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7647        IMAGE_CMD_BUF_LAYOUT_NODE node;
7648        if (!FindLayout(pCB, srcImage, sub, node)) {
7649            SetLayout(pCB, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout));
7650            continue;
7651        }
7652        if (node.layout != srcImageLayout) {
7653            // TODO: Improve log message in the next pass
7654            skip_call |=
7655                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7656                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
7657                                                                        "and doesn't match the current layout %s.",
7658                        string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
7659        }
7660    }
7661    if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
7662        if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7663            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7664            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7665                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7666                                 "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
7667        } else {
7668            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7669                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be "
7670                                                                       "TRANSFER_SRC_OPTIMAL or GENERAL.",
7671                                 string_VkImageLayout(srcImageLayout));
7672        }
7673    }
7674    return skip_call;
7675}
7676
7677static bool VerifyDestImageLayout(VkCommandBuffer cmdBuffer, VkImage destImage, VkImageSubresourceLayers subLayers,
7678                                  VkImageLayout destImageLayout) {
7679    bool skip_call = false;
7680
7681    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7682    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7683    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7684        uint32_t layer = i + subLayers.baseArrayLayer;
7685        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7686        IMAGE_CMD_BUF_LAYOUT_NODE node;
7687        if (!FindLayout(pCB, destImage, sub, node)) {
7688            SetLayout(pCB, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout));
7689            continue;
7690        }
7691        if (node.layout != destImageLayout) {
7692            skip_call |=
7693                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7694                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
7695                                                                        "doesn't match the current layout %s.",
7696                        string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
7697        }
7698    }
7699    if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
7700        if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7701            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7702            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7703                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7704                                 "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
7705        } else {
7706            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7707                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be "
7708                                                                       "TRANSFER_DST_OPTIMAL or GENERAL.",
7709                                 string_VkImageLayout(destImageLayout));
7710        }
7711    }
7712    return skip_call;
7713}
7714
7715VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7716vkCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7717               VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
7718    bool skipCall = false;
7719    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7720    loader_platform_thread_lock_mutex(&globalLock);
7721#if MTMERGESOURCE
7722    VkDeviceMemory mem;
7723    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7724    // Validate that src & dst images have correct usage flags set
7725    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7726    if (cb_data != dev_data->commandBufferMap.end()) {
7727        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyImage()", srcImage); };
7728        cb_data->second->validate_functions.push_back(function);
7729    }
7730    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImage");
7731    skipCall |=
7732        get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7733    if (cb_data != dev_data->commandBufferMap.end()) {
7734        std::function<bool()> function = [=]() {
7735            set_memory_valid(dev_data, mem, true, dstImage);
7736            return false;
7737        };
7738        cb_data->second->validate_functions.push_back(function);
7739    }
7740    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImage");
7741    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7742                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7743    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7744                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7745#endif
7746    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7747    if (pCB) {
7748        skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGE, "vkCmdCopyImage()");
7749        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImage");
7750        for (uint32_t i = 0; i < regionCount; ++i) {
7751            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].srcSubresource, srcImageLayout);
7752            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].dstSubresource, dstImageLayout);
7753        }
7754    }
7755    loader_platform_thread_unlock_mutex(&globalLock);
7756    if (!skipCall)
7757        dev_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7758                                                      regionCount, pRegions);
7759}
7760
7761VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7762vkCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7763               VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
7764    bool skipCall = false;
7765    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7766    loader_platform_thread_lock_mutex(&globalLock);
7767#if MTMERGESOURCE
7768    VkDeviceMemory mem;
7769    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7770    // Validate that src & dst images have correct usage flags set
7771    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7772    if (cb_data != dev_data->commandBufferMap.end()) {
7773        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBlitImage()", srcImage); };
7774        cb_data->second->validate_functions.push_back(function);
7775    }
7776    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdBlitImage");
7777    skipCall |=
7778        get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7779    if (cb_data != dev_data->commandBufferMap.end()) {
7780        std::function<bool()> function = [=]() {
7781            set_memory_valid(dev_data, mem, true, dstImage);
7782            return false;
7783        };
7784        cb_data->second->validate_functions.push_back(function);
7785    }
7786    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdBlitImage");
7787    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7788                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7789    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7790                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7791#endif
7792    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7793    if (pCB) {
7794        skipCall |= addCmd(dev_data, pCB, CMD_BLITIMAGE, "vkCmdBlitImage()");
7795        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBlitImage");
7796    }
7797    loader_platform_thread_unlock_mutex(&globalLock);
7798    if (!skipCall)
7799        dev_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7800                                                      regionCount, pRegions, filter);
7801}
7802
7803VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
7804                                                                  VkImage dstImage, VkImageLayout dstImageLayout,
7805                                                                  uint32_t regionCount, const VkBufferImageCopy *pRegions) {
7806    bool skipCall = false;
7807    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7808    loader_platform_thread_lock_mutex(&globalLock);
7809#if MTMERGESOURCE
7810    VkDeviceMemory mem;
7811    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7812    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7813    if (cb_data != dev_data->commandBufferMap.end()) {
7814        std::function<bool()> function = [=]() {
7815            set_memory_valid(dev_data, mem, true, dstImage);
7816            return false;
7817        };
7818        cb_data->second->validate_functions.push_back(function);
7819    }
7820    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBufferToImage");
7821    skipCall |=
7822        get_mem_binding_from_object(dev_data, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7823    if (cb_data != dev_data->commandBufferMap.end()) {
7824        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyBufferToImage()"); };
7825        cb_data->second->validate_functions.push_back(function);
7826    }
7827    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBufferToImage");
7828    // Validate that src buff & dst image have correct usage flags set
7829    skipCall |= validate_buffer_usage_flags(dev_data, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
7830                                            "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7831    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7832                                           "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7833#endif
7834    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7835    if (pCB) {
7836        skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
7837        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBufferToImage");
7838        for (uint32_t i = 0; i < regionCount; ++i) {
7839            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].imageSubresource, dstImageLayout);
7840        }
7841    }
7842    loader_platform_thread_unlock_mutex(&globalLock);
7843    if (!skipCall)
7844        dev_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount,
7845                                                              pRegions);
7846}
7847
7848VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
7849                                                                  VkImageLayout srcImageLayout, VkBuffer dstBuffer,
7850                                                                  uint32_t regionCount, const VkBufferImageCopy *pRegions) {
7851    bool skipCall = false;
7852    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7853    loader_platform_thread_lock_mutex(&globalLock);
7854#if MTMERGESOURCE
7855    VkDeviceMemory mem;
7856    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7857    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7858    if (cb_data != dev_data->commandBufferMap.end()) {
7859        std::function<bool()> function = [=]() {
7860            return validate_memory_is_valid(dev_data, mem, "vkCmdCopyImageToBuffer()", srcImage);
7861        };
7862        cb_data->second->validate_functions.push_back(function);
7863    }
7864    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImageToBuffer");
7865    skipCall |=
7866        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7867    if (cb_data != dev_data->commandBufferMap.end()) {
7868        std::function<bool()> function = [=]() {
7869            set_memory_valid(dev_data, mem, true);
7870            return false;
7871        };
7872        cb_data->second->validate_functions.push_back(function);
7873    }
7874    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImageToBuffer");
7875    // Validate that dst buff & src image have correct usage flags set
7876    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7877                                           "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7878    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7879                                            "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7880#endif
7881    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7882    if (pCB) {
7883        skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
7884        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImageToBuffer");
7885        for (uint32_t i = 0; i < regionCount; ++i) {
7886            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].imageSubresource, srcImageLayout);
7887        }
7888    }
7889    loader_platform_thread_unlock_mutex(&globalLock);
7890    if (!skipCall)
7891        dev_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount,
7892                                                              pRegions);
7893}
7894
7895VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
7896                                                             VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
7897    bool skipCall = false;
7898    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7899    loader_platform_thread_lock_mutex(&globalLock);
7900#if MTMERGESOURCE
7901    VkDeviceMemory mem;
7902    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7903    skipCall =
7904        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7905    if (cb_data != dev_data->commandBufferMap.end()) {
7906        std::function<bool()> function = [=]() {
7907            set_memory_valid(dev_data, mem, true);
7908            return false;
7909        };
7910        cb_data->second->validate_functions.push_back(function);
7911    }
7912    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdUpdateBuffer");
7913    // Validate that dst buff has correct usage flags set
7914    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7915                                            "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7916#endif
7917    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7918    if (pCB) {
7919        skipCall |= addCmd(dev_data, pCB, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
7920        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyUpdateBuffer");
7921    }
7922    loader_platform_thread_unlock_mutex(&globalLock);
7923    if (!skipCall)
7924        dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
7925}
7926
7927VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7928vkCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
7929    bool skipCall = false;
7930    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7931    loader_platform_thread_lock_mutex(&globalLock);
7932#if MTMERGESOURCE
7933    VkDeviceMemory mem;
7934    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7935    skipCall =
7936        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7937    if (cb_data != dev_data->commandBufferMap.end()) {
7938        std::function<bool()> function = [=]() {
7939            set_memory_valid(dev_data, mem, true);
7940            return false;
7941        };
7942        cb_data->second->validate_functions.push_back(function);
7943    }
7944    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdFillBuffer");
7945    // Validate that dst buff has correct usage flags set
7946    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7947                                            "vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7948#endif
7949    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7950    if (pCB) {
7951        skipCall |= addCmd(dev_data, pCB, CMD_FILLBUFFER, "vkCmdFillBuffer()");
7952        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyFillBuffer");
7953    }
7954    loader_platform_thread_unlock_mutex(&globalLock);
7955    if (!skipCall)
7956        dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
7957}
7958
7959VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
7960                                                                 const VkClearAttachment *pAttachments, uint32_t rectCount,
7961                                                                 const VkClearRect *pRects) {
7962    bool skipCall = false;
7963    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7964    loader_platform_thread_lock_mutex(&globalLock);
7965    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7966    if (pCB) {
7967        skipCall |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
7968        // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
7969        if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
7970            (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
7971            // TODO : commandBuffer should be srcObj
7972            // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
7973            // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
7974            // call CmdClearAttachments
7975            // Otherwise this seems more like a performance warning.
7976            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7977                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
7978                                "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
7979                                " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
7980                                (uint64_t)(commandBuffer));
7981        }
7982        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments");
7983    }
7984
7985    // Validate that attachment is in reference list of active subpass
7986    if (pCB->activeRenderPass) {
7987        const VkRenderPassCreateInfo *pRPCI = dev_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo;
7988        const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
7989
7990        for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) {
7991            const VkClearAttachment *attachment = &pAttachments[attachment_idx];
7992            if (attachment->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
7993                bool found = false;
7994                for (uint32_t i = 0; i < pSD->colorAttachmentCount; i++) {
7995                    if (attachment->colorAttachment == pSD->pColorAttachments[i].attachment) {
7996                        found = true;
7997                        break;
7998                    }
7999                }
8000                if (!found) {
8001                    skipCall |= log_msg(
8002                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8003                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8004                        "vkCmdClearAttachments() attachment index %d not found in attachment reference array of active subpass %d",
8005                        attachment->colorAttachment, pCB->activeSubpass);
8006                }
8007            } else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
8008                if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
8009                    (pSD->pDepthStencilAttachment->attachment ==
8010                     VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
8011
8012                    skipCall |= log_msg(
8013                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8014                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8015                        "vkCmdClearAttachments() attachment index %d does not match depthStencilAttachment.attachment (%d) found "
8016                        "in active subpass %d",
8017                        attachment->colorAttachment,
8018                        (pSD->pDepthStencilAttachment) ? pSD->pDepthStencilAttachment->attachment : VK_ATTACHMENT_UNUSED,
8019                        pCB->activeSubpass);
8020                }
8021            }
8022        }
8023    }
8024    loader_platform_thread_unlock_mutex(&globalLock);
8025    if (!skipCall)
8026        dev_data->device_dispatch_table->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
8027}
8028
8029VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
8030                                                                VkImageLayout imageLayout, const VkClearColorValue *pColor,
8031                                                                uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
8032    bool skipCall = false;
8033    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8034    loader_platform_thread_lock_mutex(&globalLock);
8035#if MTMERGESOURCE
8036    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8037    VkDeviceMemory mem;
8038    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8039    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8040    if (cb_data != dev_data->commandBufferMap.end()) {
8041        std::function<bool()> function = [=]() {
8042            set_memory_valid(dev_data, mem, true, image);
8043            return false;
8044        };
8045        cb_data->second->validate_functions.push_back(function);
8046    }
8047    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearColorImage");
8048#endif
8049    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8050    if (pCB) {
8051        skipCall |= addCmd(dev_data, pCB, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
8052        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearColorImage");
8053    }
8054    loader_platform_thread_unlock_mutex(&globalLock);
8055    if (!skipCall)
8056        dev_data->device_dispatch_table->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
8057}
8058
8059VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8060vkCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
8061                            const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
8062                            const VkImageSubresourceRange *pRanges) {
8063    bool skipCall = false;
8064    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8065    loader_platform_thread_lock_mutex(&globalLock);
8066#if MTMERGESOURCE
8067    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8068    VkDeviceMemory mem;
8069    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8070    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8071    if (cb_data != dev_data->commandBufferMap.end()) {
8072        std::function<bool()> function = [=]() {
8073            set_memory_valid(dev_data, mem, true, image);
8074            return false;
8075        };
8076        cb_data->second->validate_functions.push_back(function);
8077    }
8078    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearDepthStencilImage");
8079#endif
8080    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8081    if (pCB) {
8082        skipCall |= addCmd(dev_data, pCB, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
8083        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearDepthStencilImage");
8084    }
8085    loader_platform_thread_unlock_mutex(&globalLock);
8086    if (!skipCall)
8087        dev_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount,
8088                                                                   pRanges);
8089}
8090
8091VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8092vkCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8093                  VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
8094    bool skipCall = false;
8095    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8096    loader_platform_thread_lock_mutex(&globalLock);
8097#if MTMERGESOURCE
8098    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8099    VkDeviceMemory mem;
8100    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8101    if (cb_data != dev_data->commandBufferMap.end()) {
8102        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdResolveImage()", srcImage); };
8103        cb_data->second->validate_functions.push_back(function);
8104    }
8105    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdResolveImage");
8106    skipCall |=
8107        get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8108    if (cb_data != dev_data->commandBufferMap.end()) {
8109        std::function<bool()> function = [=]() {
8110            set_memory_valid(dev_data, mem, true, dstImage);
8111            return false;
8112        };
8113        cb_data->second->validate_functions.push_back(function);
8114    }
8115    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdResolveImage");
8116#endif
8117    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8118    if (pCB) {
8119        skipCall |= addCmd(dev_data, pCB, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
8120        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResolveImage");
8121    }
8122    loader_platform_thread_unlock_mutex(&globalLock);
8123    if (!skipCall)
8124        dev_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
8125                                                         regionCount, pRegions);
8126}
8127
8128bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8129    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8130    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8131    if (pCB) {
8132        pCB->eventToStageMap[event] = stageMask;
8133    }
8134    auto queue_data = dev_data->queueMap.find(queue);
8135    if (queue_data != dev_data->queueMap.end()) {
8136        queue_data->second.eventToStageMap[event] = stageMask;
8137    }
8138    return false;
8139}
8140
8141VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8142vkCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8143    bool skipCall = false;
8144    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8145    loader_platform_thread_lock_mutex(&globalLock);
8146    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8147    if (pCB) {
8148        skipCall |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
8149        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent");
8150        pCB->events.push_back(event);
8151        std::function<bool(VkQueue)> eventUpdate =
8152            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
8153        pCB->eventUpdates.push_back(eventUpdate);
8154    }
8155    loader_platform_thread_unlock_mutex(&globalLock);
8156    if (!skipCall)
8157        dev_data->device_dispatch_table->CmdSetEvent(commandBuffer, event, stageMask);
8158}
8159
8160VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8161vkCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8162    bool skipCall = false;
8163    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8164    loader_platform_thread_lock_mutex(&globalLock);
8165    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8166    if (pCB) {
8167        skipCall |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
8168        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent");
8169        pCB->events.push_back(event);
8170        std::function<bool(VkQueue)> eventUpdate =
8171            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
8172        pCB->eventUpdates.push_back(eventUpdate);
8173    }
8174    loader_platform_thread_unlock_mutex(&globalLock);
8175    if (!skipCall)
8176        dev_data->device_dispatch_table->CmdResetEvent(commandBuffer, event, stageMask);
8177}
8178
8179static bool TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8180                                   const VkImageMemoryBarrier *pImgMemBarriers) {
8181    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8182    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8183    bool skip = false;
8184    uint32_t levelCount = 0;
8185    uint32_t layerCount = 0;
8186
8187    for (uint32_t i = 0; i < memBarrierCount; ++i) {
8188        auto mem_barrier = &pImgMemBarriers[i];
8189        if (!mem_barrier)
8190            continue;
8191        // TODO: Do not iterate over every possibility - consolidate where
8192        // possible
8193        ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
8194
8195        for (uint32_t j = 0; j < levelCount; j++) {
8196            uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
8197            for (uint32_t k = 0; k < layerCount; k++) {
8198                uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
8199                VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer};
8200                IMAGE_CMD_BUF_LAYOUT_NODE node;
8201                if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
8202                    SetLayout(pCB, mem_barrier->image, sub,
8203                              IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout));
8204                    continue;
8205                }
8206                if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
8207                    // TODO: Set memory invalid which is in mem_tracker currently
8208                } else if (node.layout != mem_barrier->oldLayout) {
8209                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8210                                    __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s "
8211                                                                                    "when current layout is %s.",
8212                                    string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
8213                }
8214                SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
8215            }
8216        }
8217    }
8218    return skip;
8219}
8220
8221// Print readable FlagBits in FlagMask
8222static std::string string_VkAccessFlags(VkAccessFlags accessMask) {
8223    std::string result;
8224    std::string separator;
8225
8226    if (accessMask == 0) {
8227        result = "[None]";
8228    } else {
8229        result = "[";
8230        for (auto i = 0; i < 32; i++) {
8231            if (accessMask & (1 << i)) {
8232                result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
8233                separator = " | ";
8234            }
8235        }
8236        result = result + "]";
8237    }
8238    return result;
8239}
8240
8241// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
8242// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
8243// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
8244static bool ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8245                             const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits,
8246                             const char *type) {
8247    bool skip_call = false;
8248
8249    if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
8250        if (accessMask & !(required_bit | optional_bits)) {
8251            // TODO: Verify against Valid Use
8252            skip_call |=
8253                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8254                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.",
8255                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8256        }
8257    } else {
8258        if (!required_bit) {
8259            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8260                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
8261                                                                  "%s when layout is %s, unless the app has previously added a "
8262                                                                  "barrier for this transition.",
8263                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
8264                                 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
8265        } else {
8266            std::string opt_bits;
8267            if (optional_bits != 0) {
8268                std::stringstream ss;
8269                ss << optional_bits;
8270                opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
8271            }
8272            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8273                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
8274                                                                  "layout is %s, unless the app has previously added a barrier for "
8275                                                                  "this transition.",
8276                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
8277                                 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
8278        }
8279    }
8280    return skip_call;
8281}
8282
8283static bool ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8284                                        const VkImageLayout &layout, const char *type) {
8285    bool skip_call = false;
8286    switch (layout) {
8287    case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
8288        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
8289                                      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type);
8290        break;
8291    }
8292    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
8293        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
8294                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type);
8295        break;
8296    }
8297    case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
8298        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
8299        break;
8300    }
8301    case VK_IMAGE_LAYOUT_PREINITIALIZED: {
8302        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type);
8303        break;
8304    }
8305    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
8306        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8307                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8308        break;
8309    }
8310    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
8311        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8312                                      VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8313        break;
8314    }
8315    case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
8316        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
8317        break;
8318    }
8319    case VK_IMAGE_LAYOUT_UNDEFINED: {
8320        if (accessMask != 0) {
8321            // TODO: Verify against Valid Use section spec
8322            skip_call |=
8323                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8324                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.",
8325                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8326        }
8327        break;
8328    }
8329    case VK_IMAGE_LAYOUT_GENERAL:
8330    default: { break; }
8331    }
8332    return skip_call;
8333}
8334
8335static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8336                             const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
8337                             const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
8338                             const VkImageMemoryBarrier *pImageMemBarriers) {
8339    bool skip_call = false;
8340    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8341    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8342    if (pCB->activeRenderPass && memBarrierCount) {
8343        if (!dev_data->renderPassMap[pCB->activeRenderPass]->hasSelfDependency[pCB->activeSubpass]) {
8344            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8345                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
8346                                                                  "with no self dependency specified.",
8347                                 funcName, pCB->activeSubpass);
8348        }
8349    }
8350    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
8351        auto mem_barrier = &pImageMemBarriers[i];
8352        auto image_data = dev_data->imageMap.find(mem_barrier->image);
8353        if (image_data != dev_data->imageMap.end()) {
8354            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
8355            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
8356            if (image_data->second.createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
8357                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
8358                // be VK_QUEUE_FAMILY_IGNORED
8359                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
8360                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8361                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8362                                         "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
8363                                         "VK_SHARING_MODE_CONCURRENT.  Src and dst "
8364                                         " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
8365                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8366                }
8367            } else {
8368                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
8369                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
8370                // or both be a valid queue family
8371                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
8372                    (src_q_f_index != dst_q_f_index)) {
8373                    skip_call |=
8374                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8375                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
8376                                                                     "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
8377                                                                     "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
8378                                                                     "must be.",
8379                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8380                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
8381                           ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
8382                            (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) {
8383                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8384                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8385                                         "%s: Image 0x%" PRIx64 " was created with sharingMode "
8386                                         "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
8387                                         " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
8388                                         "queueFamilies crated for this device.",
8389                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index,
8390                                         dst_q_f_index, dev_data->phys_dev_properties.queue_family_properties.size());
8391                }
8392            }
8393        }
8394
8395        if (mem_barrier) {
8396            skip_call |=
8397                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
8398            skip_call |=
8399                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
8400            if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
8401                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8402                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
8403                                                         "PREINITIALIZED.",
8404                        funcName);
8405            }
8406            auto image_data = dev_data->imageMap.find(mem_barrier->image);
8407            VkFormat format = VK_FORMAT_UNDEFINED;
8408            uint32_t arrayLayers = 0, mipLevels = 0;
8409            bool imageFound = false;
8410            if (image_data != dev_data->imageMap.end()) {
8411                format = image_data->second.createInfo.format;
8412                arrayLayers = image_data->second.createInfo.arrayLayers;
8413                mipLevels = image_data->second.createInfo.mipLevels;
8414                imageFound = true;
8415            } else if (dev_data->device_extensions.wsi_enabled) {
8416                auto imageswap_data = dev_data->device_extensions.imageToSwapchainMap.find(mem_barrier->image);
8417                if (imageswap_data != dev_data->device_extensions.imageToSwapchainMap.end()) {
8418                    auto swapchain_data = dev_data->device_extensions.swapchainMap.find(imageswap_data->second);
8419                    if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
8420                        format = swapchain_data->second->createInfo.imageFormat;
8421                        arrayLayers = swapchain_data->second->createInfo.imageArrayLayers;
8422                        mipLevels = 1;
8423                        imageFound = true;
8424                    }
8425                }
8426            }
8427            if (imageFound) {
8428                if (vk_format_is_depth_and_stencil(format) &&
8429                    (!(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) ||
8430                     !(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT))) {
8431                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8432                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth and stencil format and thus must "
8433                                                             "have both VK_IMAGE_ASPECT_DEPTH_BIT and "
8434                                                             "VK_IMAGE_ASPECT_STENCIL_BIT set.",
8435                            funcName);
8436                }
8437                int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
8438                                     ? 1
8439                                     : mem_barrier->subresourceRange.layerCount;
8440                if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
8441                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8442                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
8443                                                             "baseArrayLayer (%d) and layerCount (%d) be less "
8444                                                             "than or equal to the total number of layers (%d).",
8445                            funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount,
8446                            arrayLayers);
8447                }
8448                int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
8449                                     ? 1
8450                                     : mem_barrier->subresourceRange.levelCount;
8451                if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
8452                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8453                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
8454                                                             "(%d) and levelCount (%d) be less than or equal to "
8455                                                             "the total number of levels (%d).",
8456                            funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount,
8457                            mipLevels);
8458                }
8459            }
8460        }
8461    }
8462    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
8463        auto mem_barrier = &pBufferMemBarriers[i];
8464        if (pCB->activeRenderPass) {
8465            skip_call |=
8466                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8467                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
8468        }
8469        if (!mem_barrier)
8470            continue;
8471
8472        // Validate buffer barrier queue family indices
8473        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8474             mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
8475            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8476             mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) {
8477            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8478                                 DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8479                                 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
8480                                 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
8481                                 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8482                                 dev_data->phys_dev_properties.queue_family_properties.size());
8483        }
8484
8485        auto buffer_data = dev_data->bufferMap.find(mem_barrier->buffer);
8486        VkDeviceSize buffer_size = (buffer_data->second.createInfo.sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO)
8487                                       ? buffer_data->second.createInfo.size
8488                                       : 0;
8489        if (buffer_data != dev_data->bufferMap.end()) {
8490            if (mem_barrier->offset >= buffer_size) {
8491                skip_call |= log_msg(
8492                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8493                    DRAWSTATE_INVALID_BARRIER, "DS",
8494                    "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64 " which is not less than total size %" PRIu64 ".",
8495                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8496                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(buffer_size));
8497            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
8498                skip_call |= log_msg(
8499                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8500                    DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64 " and size %" PRIu64
8501                                                     " whose sum is greater than total size %" PRIu64 ".",
8502                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8503                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(mem_barrier->size),
8504                    reinterpret_cast<const uint64_t &>(buffer_size));
8505            }
8506        }
8507    }
8508    return skip_call;
8509}
8510
8511bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) {
8512    bool skip_call = false;
8513    VkPipelineStageFlags stageMask = 0;
8514    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
8515    for (uint32_t i = 0; i < eventCount; ++i) {
8516        auto event = pCB->events[firstEventIndex + i];
8517        auto queue_data = dev_data->queueMap.find(queue);
8518        if (queue_data == dev_data->queueMap.end())
8519            return false;
8520        auto event_data = queue_data->second.eventToStageMap.find(event);
8521        if (event_data != queue_data->second.eventToStageMap.end()) {
8522            stageMask |= event_data->second;
8523        } else {
8524            auto global_event_data = dev_data->eventMap.find(event);
8525            if (global_event_data == dev_data->eventMap.end()) {
8526                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
8527                                     reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
8528                                     "Event 0x%" PRIx64 " cannot be waited on if it has never been set.",
8529                                     reinterpret_cast<const uint64_t &>(event));
8530            } else {
8531                stageMask |= global_event_data->second.stageMask;
8532            }
8533        }
8534    }
8535    if (sourceStageMask != stageMask) {
8536        skip_call |=
8537            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8538                    DRAWSTATE_INVALID_EVENT, "DS",
8539                    "Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%x which must be the bitwise OR of the "
8540                    "stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with vkSetEvent.",
8541                    sourceStageMask);
8542    }
8543    return skip_call;
8544}
8545
8546VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8547vkCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask,
8548                VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8549                uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8550                uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8551    bool skipCall = false;
8552    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8553    loader_platform_thread_lock_mutex(&globalLock);
8554    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8555    if (pCB) {
8556        auto firstEventIndex = pCB->events.size();
8557        for (uint32_t i = 0; i < eventCount; ++i) {
8558            pCB->waitedEvents.push_back(pEvents[i]);
8559            pCB->events.push_back(pEvents[i]);
8560        }
8561        std::function<bool(VkQueue)> eventUpdate =
8562            std::bind(validateEventStageMask, std::placeholders::_1, pCB, eventCount, firstEventIndex, sourceStageMask);
8563        pCB->eventUpdates.push_back(eventUpdate);
8564        if (pCB->state == CB_RECORDING) {
8565            skipCall |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()");
8566        } else {
8567            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
8568        }
8569        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8570        skipCall |=
8571            ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8572                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8573    }
8574    loader_platform_thread_unlock_mutex(&globalLock);
8575    if (!skipCall)
8576        dev_data->device_dispatch_table->CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
8577                                                       memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8578                                                       pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8579}
8580
8581VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8582vkCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
8583                     VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8584                     uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8585                     uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8586    bool skipCall = false;
8587    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8588    loader_platform_thread_lock_mutex(&globalLock);
8589    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8590    if (pCB) {
8591        skipCall |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
8592        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8593        skipCall |=
8594            ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8595                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8596    }
8597    loader_platform_thread_unlock_mutex(&globalLock);
8598    if (!skipCall)
8599        dev_data->device_dispatch_table->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
8600                                                            memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8601                                                            pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8602}
8603
8604VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8605vkCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
8606    bool skipCall = false;
8607    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8608    loader_platform_thread_lock_mutex(&globalLock);
8609    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8610    if (pCB) {
8611        QueryObject query = {queryPool, slot};
8612        pCB->activeQueries.insert(query);
8613        if (!pCB->startedQueries.count(query)) {
8614            pCB->startedQueries.insert(query);
8615        }
8616        skipCall |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
8617    }
8618    loader_platform_thread_unlock_mutex(&globalLock);
8619    if (!skipCall)
8620        dev_data->device_dispatch_table->CmdBeginQuery(commandBuffer, queryPool, slot, flags);
8621}
8622
8623VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
8624    bool skipCall = false;
8625    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8626    loader_platform_thread_lock_mutex(&globalLock);
8627    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8628    if (pCB) {
8629        QueryObject query = {queryPool, slot};
8630        if (!pCB->activeQueries.count(query)) {
8631            skipCall |=
8632                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8633                        DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool %" PRIu64 ", index %d",
8634                        (uint64_t)(queryPool), slot);
8635        } else {
8636            pCB->activeQueries.erase(query);
8637        }
8638        pCB->queryToStateMap[query] = 1;
8639        if (pCB->state == CB_RECORDING) {
8640            skipCall |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
8641        } else {
8642            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
8643        }
8644    }
8645    loader_platform_thread_unlock_mutex(&globalLock);
8646    if (!skipCall)
8647        dev_data->device_dispatch_table->CmdEndQuery(commandBuffer, queryPool, slot);
8648}
8649
8650VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8651vkCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
8652    bool skipCall = false;
8653    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8654    loader_platform_thread_lock_mutex(&globalLock);
8655    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8656    if (pCB) {
8657        for (uint32_t i = 0; i < queryCount; i++) {
8658            QueryObject query = {queryPool, firstQuery + i};
8659            pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
8660            pCB->queryToStateMap[query] = 0;
8661        }
8662        if (pCB->state == CB_RECORDING) {
8663            skipCall |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
8664        } else {
8665            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
8666        }
8667        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool");
8668    }
8669    loader_platform_thread_unlock_mutex(&globalLock);
8670    if (!skipCall)
8671        dev_data->device_dispatch_table->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
8672}
8673
8674VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8675vkCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
8676                          VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
8677    bool skipCall = false;
8678    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8679    loader_platform_thread_lock_mutex(&globalLock);
8680    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8681#if MTMERGESOURCE
8682    VkDeviceMemory mem;
8683    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8684    skipCall |=
8685        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8686    if (cb_data != dev_data->commandBufferMap.end()) {
8687        std::function<bool()> function = [=]() {
8688            set_memory_valid(dev_data, mem, true);
8689            return false;
8690        };
8691        cb_data->second->validate_functions.push_back(function);
8692    }
8693    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyQueryPoolResults");
8694    // Validate that DST buffer has correct usage flags set
8695    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8696                                            "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8697#endif
8698    if (pCB) {
8699        for (uint32_t i = 0; i < queryCount; i++) {
8700            QueryObject query = {queryPool, firstQuery + i};
8701            if (!pCB->queryToStateMap[query]) {
8702                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8703                                    __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
8704                                    "Requesting a copy from query to buffer with invalid query: queryPool %" PRIu64 ", index %d",
8705                                    (uint64_t)(queryPool), firstQuery + i);
8706            }
8707        }
8708        if (pCB->state == CB_RECORDING) {
8709            skipCall |= addCmd(dev_data, pCB, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
8710        } else {
8711            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
8712        }
8713        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyQueryPoolResults");
8714    }
8715    loader_platform_thread_unlock_mutex(&globalLock);
8716    if (!skipCall)
8717        dev_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer,
8718                                                                 dstOffset, stride, flags);
8719}
8720
8721VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
8722                                                              VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
8723                                                              const void *pValues) {
8724    bool skipCall = false;
8725    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8726    loader_platform_thread_lock_mutex(&globalLock);
8727    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8728    if (pCB) {
8729        if (pCB->state == CB_RECORDING) {
8730            skipCall |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
8731        } else {
8732            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
8733        }
8734    }
8735    if ((offset + size) > dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize) {
8736        skipCall |= validatePushConstantSize(dev_data, offset, size, "vkCmdPushConstants()");
8737    }
8738    // TODO : Add warning if push constant update doesn't align with range
8739    loader_platform_thread_unlock_mutex(&globalLock);
8740    if (!skipCall)
8741        dev_data->device_dispatch_table->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
8742}
8743
8744VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8745vkCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
8746    bool skipCall = false;
8747    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8748    loader_platform_thread_lock_mutex(&globalLock);
8749    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8750    if (pCB) {
8751        QueryObject query = {queryPool, slot};
8752        pCB->queryToStateMap[query] = 1;
8753        if (pCB->state == CB_RECORDING) {
8754            skipCall |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
8755        } else {
8756            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
8757        }
8758    }
8759    loader_platform_thread_unlock_mutex(&globalLock);
8760    if (!skipCall)
8761        dev_data->device_dispatch_table->CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
8762}
8763
8764VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
8765                                                                   const VkAllocationCallbacks *pAllocator,
8766                                                                   VkFramebuffer *pFramebuffer) {
8767    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8768    VkResult result = dev_data->device_dispatch_table->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
8769    if (VK_SUCCESS == result) {
8770        // Shadow create info and store in map
8771        loader_platform_thread_lock_mutex(&globalLock);
8772
8773        auto & fbNode = dev_data->frameBufferMap[*pFramebuffer];
8774        fbNode.createInfo = *pCreateInfo;
8775        if (pCreateInfo->pAttachments) {
8776            auto attachments = new VkImageView[pCreateInfo->attachmentCount];
8777            memcpy(attachments,
8778                   pCreateInfo->pAttachments,
8779                   pCreateInfo->attachmentCount * sizeof(VkImageView));
8780            fbNode.createInfo.pAttachments = attachments;
8781        }
8782        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8783            VkImageView view = pCreateInfo->pAttachments[i];
8784            auto view_data = dev_data->imageViewMap.find(view);
8785            if (view_data == dev_data->imageViewMap.end()) {
8786                continue;
8787            }
8788            MT_FB_ATTACHMENT_INFO fb_info;
8789            get_mem_binding_from_object(dev_data, (uint64_t)(view_data->second.image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8790                                        &fb_info.mem);
8791            fb_info.image = view_data->second.image;
8792            fbNode.attachments.push_back(fb_info);
8793        }
8794
8795        loader_platform_thread_unlock_mutex(&globalLock);
8796    }
8797    return result;
8798}
8799
8800static bool FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
8801                           std::unordered_set<uint32_t> &processed_nodes) {
8802    // If we have already checked this node we have not found a dependency path so return false.
8803    if (processed_nodes.count(index))
8804        return false;
8805    processed_nodes.insert(index);
8806    const DAGNode &node = subpass_to_node[index];
8807    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
8808    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
8809        for (auto elem : node.prev) {
8810            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
8811                return true;
8812        }
8813    } else {
8814        return true;
8815    }
8816    return false;
8817}
8818
8819static bool CheckDependencyExists(const layer_data *my_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
8820                                  const std::vector<DAGNode> &subpass_to_node, bool &skip_call) {
8821    bool result = true;
8822    // Loop through all subpasses that share the same attachment and make sure a dependency exists
8823    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
8824        if (static_cast<uint32_t>(subpass) == dependent_subpasses[k])
8825            continue;
8826        const DAGNode &node = subpass_to_node[subpass];
8827        // Check for a specified dependency between the two nodes. If one exists we are done.
8828        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
8829        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
8830        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
8831            // If no dependency exits an implicit dependency still might. If so, warn and if not throw an error.
8832            std::unordered_set<uint32_t> processed_nodes;
8833            if (FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
8834                FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes)) {
8835                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8836                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8837                                     "A dependency between subpasses %d and %d must exist but only an implicit one is specified.",
8838                                     subpass, dependent_subpasses[k]);
8839            } else {
8840                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8841                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8842                                     "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
8843                                     dependent_subpasses[k]);
8844                result = false;
8845            }
8846        }
8847    }
8848    return result;
8849}
8850
8851static bool CheckPreserved(const layer_data *my_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
8852                           const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip_call) {
8853    const DAGNode &node = subpass_to_node[index];
8854    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
8855    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
8856    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8857        if (attachment == subpass.pColorAttachments[j].attachment)
8858            return true;
8859    }
8860    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8861        if (attachment == subpass.pDepthStencilAttachment->attachment)
8862            return true;
8863    }
8864    bool result = false;
8865    // Loop through previous nodes and see if any of them write to the attachment.
8866    for (auto elem : node.prev) {
8867        result |= CheckPreserved(my_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
8868    }
8869    // If the attachment was written to by a previous node than this node needs to preserve it.
8870    if (result && depth > 0) {
8871        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
8872        bool has_preserved = false;
8873        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
8874            if (subpass.pPreserveAttachments[j] == attachment) {
8875                has_preserved = true;
8876                break;
8877            }
8878        }
8879        if (!has_preserved) {
8880            skip_call |=
8881                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8882                        DRAWSTATE_INVALID_RENDERPASS, "DS",
8883                        "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
8884        }
8885    }
8886    return result;
8887}
8888
8889template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
8890    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
8891           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
8892}
8893
8894bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
8895    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
8896            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
8897}
8898
8899static bool ValidateDependencies(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin,
8900                                 const std::vector<DAGNode> &subpass_to_node) {
8901    bool skip_call = false;
8902    const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo;
8903    const VkRenderPassCreateInfo *pCreateInfo = my_data->renderPassMap.at(pRenderPassBegin->renderPass)->pCreateInfo;
8904    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
8905    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
8906    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
8907    // Find overlapping attachments
8908    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8909        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
8910            VkImageView viewi = pFramebufferInfo->pAttachments[i];
8911            VkImageView viewj = pFramebufferInfo->pAttachments[j];
8912            if (viewi == viewj) {
8913                overlapping_attachments[i].push_back(j);
8914                overlapping_attachments[j].push_back(i);
8915                continue;
8916            }
8917            auto view_data_i = my_data->imageViewMap.find(viewi);
8918            auto view_data_j = my_data->imageViewMap.find(viewj);
8919            if (view_data_i == my_data->imageViewMap.end() || view_data_j == my_data->imageViewMap.end()) {
8920                continue;
8921            }
8922            if (view_data_i->second.image == view_data_j->second.image &&
8923                isRegionOverlapping(view_data_i->second.subresourceRange, view_data_j->second.subresourceRange)) {
8924                overlapping_attachments[i].push_back(j);
8925                overlapping_attachments[j].push_back(i);
8926                continue;
8927            }
8928            auto image_data_i = my_data->imageMap.find(view_data_i->second.image);
8929            auto image_data_j = my_data->imageMap.find(view_data_j->second.image);
8930            if (image_data_i == my_data->imageMap.end() || image_data_j == my_data->imageMap.end()) {
8931                continue;
8932            }
8933            if (image_data_i->second.mem == image_data_j->second.mem &&
8934                isRangeOverlapping(image_data_i->second.memOffset, image_data_i->second.memSize, image_data_j->second.memOffset,
8935                                   image_data_j->second.memSize)) {
8936                overlapping_attachments[i].push_back(j);
8937                overlapping_attachments[j].push_back(i);
8938            }
8939        }
8940    }
8941    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
8942        uint32_t attachment = i;
8943        for (auto other_attachment : overlapping_attachments[i]) {
8944            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8945                skip_call |=
8946                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8947                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
8948                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
8949                            attachment, other_attachment);
8950            }
8951            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8952                skip_call |=
8953                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8954                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
8955                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
8956                            other_attachment, attachment);
8957            }
8958        }
8959    }
8960    // Find for each attachment the subpasses that use them.
8961    unordered_set<uint32_t> attachmentIndices;
8962    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8963        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8964        attachmentIndices.clear();
8965        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8966            uint32_t attachment = subpass.pInputAttachments[j].attachment;
8967            input_attachment_to_subpass[attachment].push_back(i);
8968            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8969                input_attachment_to_subpass[overlapping_attachment].push_back(i);
8970            }
8971        }
8972        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8973            uint32_t attachment = subpass.pColorAttachments[j].attachment;
8974            output_attachment_to_subpass[attachment].push_back(i);
8975            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8976                output_attachment_to_subpass[overlapping_attachment].push_back(i);
8977            }
8978            attachmentIndices.insert(attachment);
8979        }
8980        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8981            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
8982            output_attachment_to_subpass[attachment].push_back(i);
8983            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8984                output_attachment_to_subpass[overlapping_attachment].push_back(i);
8985            }
8986
8987            if (attachmentIndices.count(attachment)) {
8988                skip_call |=
8989                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
8990                            0, __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8991                            "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).",
8992                            attachment, i);
8993            }
8994        }
8995    }
8996    // If there is a dependency needed make sure one exists
8997    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8998        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8999        // If the attachment is an input then all subpasses that output must have a dependency relationship
9000        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9001            const uint32_t &attachment = subpass.pInputAttachments[j].attachment;
9002            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9003        }
9004        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
9005        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9006            const uint32_t &attachment = subpass.pColorAttachments[j].attachment;
9007            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9008            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9009        }
9010        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9011            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
9012            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9013            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9014        }
9015    }
9016    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
9017    // written.
9018    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9019        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9020        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9021            CheckPreserved(my_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
9022        }
9023    }
9024    return skip_call;
9025}
9026
9027static bool ValidateLayouts(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
9028    bool skip = false;
9029
9030    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9031        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9032        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9033            if (subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL &&
9034                subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
9035                if (subpass.pInputAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
9036                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9037                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9038                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9039                                    "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
9040                } else {
9041                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9042                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9043                                    "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
9044                                    string_VkImageLayout(subpass.pInputAttachments[j].layout));
9045                }
9046            }
9047        }
9048        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9049            if (subpass.pColorAttachments[j].layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
9050                if (subpass.pColorAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
9051                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9052                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9053                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9054                                    "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
9055                } else {
9056                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9057                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9058                                    "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
9059                                    string_VkImageLayout(subpass.pColorAttachments[j].layout));
9060                }
9061            }
9062        }
9063        if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
9064            if (subpass.pDepthStencilAttachment->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) {
9065                if (subpass.pDepthStencilAttachment->layout == VK_IMAGE_LAYOUT_GENERAL) {
9066                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9067                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9068                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9069                                    "Layout for depth attachment is GENERAL but should be DEPTH_STENCIL_ATTACHMENT_OPTIMAL.");
9070                } else {
9071                    skip |=
9072                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9073                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9074                                "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL or GENERAL.",
9075                                string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
9076                }
9077            }
9078        }
9079    }
9080    return skip;
9081}
9082
9083static bool CreatePassDAG(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9084                          std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
9085    bool skip_call = false;
9086    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9087        DAGNode &subpass_node = subpass_to_node[i];
9088        subpass_node.pass = i;
9089    }
9090    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
9091        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
9092        if (dependency.srcSubpass > dependency.dstSubpass && dependency.srcSubpass != VK_SUBPASS_EXTERNAL &&
9093            dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
9094            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9095                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
9096                                 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
9097        } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL && dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
9098            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9099                                 DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
9100        } else if (dependency.srcSubpass == dependency.dstSubpass) {
9101            has_self_dependency[dependency.srcSubpass] = true;
9102        }
9103        if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
9104            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
9105        }
9106        if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL) {
9107            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
9108        }
9109    }
9110    return skip_call;
9111}
9112
9113
9114VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
9115                                                                    const VkAllocationCallbacks *pAllocator,
9116                                                                    VkShaderModule *pShaderModule) {
9117    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9118    bool skip_call = false;
9119    if (!shader_is_spirv(pCreateInfo)) {
9120        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
9121                             /* dev */ 0, __LINE__, SHADER_CHECKER_NON_SPIRV_SHADER, "SC", "Shader is not SPIR-V");
9122    }
9123
9124    if (skip_call)
9125        return VK_ERROR_VALIDATION_FAILED_EXT;
9126
9127    VkResult res = my_data->device_dispatch_table->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
9128
9129    if (res == VK_SUCCESS) {
9130        loader_platform_thread_lock_mutex(&globalLock);
9131        my_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
9132        loader_platform_thread_unlock_mutex(&globalLock);
9133    }
9134    return res;
9135}
9136
9137VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9138                                                                  const VkAllocationCallbacks *pAllocator,
9139                                                                  VkRenderPass *pRenderPass) {
9140    bool skip_call = false;
9141    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9142    loader_platform_thread_lock_mutex(&globalLock);
9143    // Create DAG
9144    std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
9145    std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
9146    skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
9147    // Validate
9148    skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
9149    if (skip_call) {
9150        loader_platform_thread_unlock_mutex(&globalLock);
9151        return VK_ERROR_VALIDATION_FAILED_EXT;
9152    }
9153    loader_platform_thread_unlock_mutex(&globalLock);
9154    VkResult result = dev_data->device_dispatch_table->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
9155    if (VK_SUCCESS == result) {
9156        loader_platform_thread_lock_mutex(&globalLock);
9157        // TODOSC : Merge in tracking of renderpass from shader_checker
9158        // Shadow create info and store in map
9159        VkRenderPassCreateInfo *localRPCI = new VkRenderPassCreateInfo(*pCreateInfo);
9160        if (pCreateInfo->pAttachments) {
9161            localRPCI->pAttachments = new VkAttachmentDescription[localRPCI->attachmentCount];
9162            memcpy((void *)localRPCI->pAttachments, pCreateInfo->pAttachments,
9163                   localRPCI->attachmentCount * sizeof(VkAttachmentDescription));
9164        }
9165        if (pCreateInfo->pSubpasses) {
9166            localRPCI->pSubpasses = new VkSubpassDescription[localRPCI->subpassCount];
9167            memcpy((void *)localRPCI->pSubpasses, pCreateInfo->pSubpasses, localRPCI->subpassCount * sizeof(VkSubpassDescription));
9168
9169            for (uint32_t i = 0; i < localRPCI->subpassCount; i++) {
9170                VkSubpassDescription *subpass = (VkSubpassDescription *)&localRPCI->pSubpasses[i];
9171                const uint32_t attachmentCount = subpass->inputAttachmentCount +
9172                                                 subpass->colorAttachmentCount * (1 + (subpass->pResolveAttachments ? 1 : 0)) +
9173                                                 ((subpass->pDepthStencilAttachment) ? 1 : 0) + subpass->preserveAttachmentCount;
9174                VkAttachmentReference *attachments = new VkAttachmentReference[attachmentCount];
9175
9176                memcpy(attachments, subpass->pInputAttachments, sizeof(attachments[0]) * subpass->inputAttachmentCount);
9177                subpass->pInputAttachments = attachments;
9178                attachments += subpass->inputAttachmentCount;
9179
9180                memcpy(attachments, subpass->pColorAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
9181                subpass->pColorAttachments = attachments;
9182                attachments += subpass->colorAttachmentCount;
9183
9184                if (subpass->pResolveAttachments) {
9185                    memcpy(attachments, subpass->pResolveAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
9186                    subpass->pResolveAttachments = attachments;
9187                    attachments += subpass->colorAttachmentCount;
9188                }
9189
9190                if (subpass->pDepthStencilAttachment) {
9191                    memcpy(attachments, subpass->pDepthStencilAttachment, sizeof(attachments[0]) * 1);
9192                    subpass->pDepthStencilAttachment = attachments;
9193                    attachments += 1;
9194                }
9195
9196                memcpy(attachments, subpass->pPreserveAttachments, sizeof(attachments[0]) * subpass->preserveAttachmentCount);
9197                subpass->pPreserveAttachments = &attachments->attachment;
9198            }
9199        }
9200        if (pCreateInfo->pDependencies) {
9201            localRPCI->pDependencies = new VkSubpassDependency[localRPCI->dependencyCount];
9202            memcpy((void *)localRPCI->pDependencies, pCreateInfo->pDependencies,
9203                   localRPCI->dependencyCount * sizeof(VkSubpassDependency));
9204        }
9205        dev_data->renderPassMap[*pRenderPass] = new RENDER_PASS_NODE(localRPCI);
9206        dev_data->renderPassMap[*pRenderPass]->hasSelfDependency = has_self_dependency;
9207        dev_data->renderPassMap[*pRenderPass]->subpassToNode = subpass_to_node;
9208#if MTMERGESOURCE
9209        // MTMTODO : Merge with code from above to eliminate duplication
9210        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9211            VkAttachmentDescription desc = pCreateInfo->pAttachments[i];
9212            MT_PASS_ATTACHMENT_INFO pass_info;
9213            pass_info.load_op = desc.loadOp;
9214            pass_info.store_op = desc.storeOp;
9215            pass_info.attachment = i;
9216            dev_data->renderPassMap[*pRenderPass]->attachments.push_back(pass_info);
9217        }
9218        // TODO: Maybe fill list and then copy instead of locking
9219        std::unordered_map<uint32_t, bool> &attachment_first_read = dev_data->renderPassMap[*pRenderPass]->attachment_first_read;
9220        std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout =
9221            dev_data->renderPassMap[*pRenderPass]->attachment_first_layout;
9222        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9223            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9224            if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
9225                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9226                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9227                                     "Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", i);
9228            }
9229            for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9230                uint32_t attachment = subpass.pPreserveAttachments[j];
9231                if (attachment >= pCreateInfo->attachmentCount) {
9232                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9233                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9234                                         "Preserve attachment %d cannot be greater than the total number of attachments %d.",
9235                                         attachment, pCreateInfo->attachmentCount);
9236                }
9237            }
9238            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9239                uint32_t attachment;
9240                if (subpass.pResolveAttachments) {
9241                    attachment = subpass.pResolveAttachments[j].attachment;
9242                    if (attachment >= pCreateInfo->attachmentCount && attachment != VK_ATTACHMENT_UNUSED) {
9243                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9244                                             __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9245                                             "Color attachment %d cannot be greater than the total number of attachments %d.",
9246                                             attachment, pCreateInfo->attachmentCount);
9247                        continue;
9248                    }
9249                }
9250                attachment = subpass.pColorAttachments[j].attachment;
9251                if (attachment >= pCreateInfo->attachmentCount) {
9252                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9253                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9254                                         "Color attachment %d cannot be greater than the total number of attachments %d.",
9255                                         attachment, pCreateInfo->attachmentCount);
9256                    continue;
9257                }
9258                if (attachment_first_read.count(attachment))
9259                    continue;
9260                attachment_first_read.insert(std::make_pair(attachment, false));
9261                attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
9262            }
9263            if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9264                uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9265                if (attachment >= pCreateInfo->attachmentCount) {
9266                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9267                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9268                                         "Depth stencil attachment %d cannot be greater than the total number of attachments %d.",
9269                                         attachment, pCreateInfo->attachmentCount);
9270                    continue;
9271                }
9272                if (attachment_first_read.count(attachment))
9273                    continue;
9274                attachment_first_read.insert(std::make_pair(attachment, false));
9275                attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
9276            }
9277            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9278                uint32_t attachment = subpass.pInputAttachments[j].attachment;
9279                if (attachment >= pCreateInfo->attachmentCount) {
9280                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9281                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9282                                         "Input attachment %d cannot be greater than the total number of attachments %d.",
9283                                         attachment, pCreateInfo->attachmentCount);
9284                    continue;
9285                }
9286                if (attachment_first_read.count(attachment))
9287                    continue;
9288                attachment_first_read.insert(std::make_pair(attachment, true));
9289                attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
9290            }
9291        }
9292#endif
9293        loader_platform_thread_unlock_mutex(&globalLock);
9294    }
9295    return result;
9296}
9297// Free the renderpass shadow
9298static void deleteRenderPasses(layer_data *my_data) {
9299    if (my_data->renderPassMap.size() <= 0)
9300        return;
9301    for (auto ii = my_data->renderPassMap.begin(); ii != my_data->renderPassMap.end(); ++ii) {
9302        const VkRenderPassCreateInfo *pRenderPassInfo = (*ii).second->pCreateInfo;
9303        delete[] pRenderPassInfo->pAttachments;
9304        if (pRenderPassInfo->pSubpasses) {
9305            for (uint32_t i = 0; i < pRenderPassInfo->subpassCount; ++i) {
9306                // Attachements are all allocated in a block, so just need to
9307                //  find the first non-null one to delete
9308                if (pRenderPassInfo->pSubpasses[i].pInputAttachments) {
9309                    delete[] pRenderPassInfo->pSubpasses[i].pInputAttachments;
9310                } else if (pRenderPassInfo->pSubpasses[i].pColorAttachments) {
9311                    delete[] pRenderPassInfo->pSubpasses[i].pColorAttachments;
9312                } else if (pRenderPassInfo->pSubpasses[i].pResolveAttachments) {
9313                    delete[] pRenderPassInfo->pSubpasses[i].pResolveAttachments;
9314                } else if (pRenderPassInfo->pSubpasses[i].pPreserveAttachments) {
9315                    delete[] pRenderPassInfo->pSubpasses[i].pPreserveAttachments;
9316                }
9317            }
9318            delete[] pRenderPassInfo->pSubpasses;
9319        }
9320        delete[] pRenderPassInfo->pDependencies;
9321        delete pRenderPassInfo;
9322        delete (*ii).second;
9323    }
9324    my_data->renderPassMap.clear();
9325}
9326
9327static bool VerifyFramebufferAndRenderPassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) {
9328    bool skip_call = false;
9329    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9330    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9331    const VkRenderPassCreateInfo *pRenderPassInfo = dev_data->renderPassMap[pRenderPassBegin->renderPass]->pCreateInfo;
9332    const VkFramebufferCreateInfo framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer].createInfo;
9333    if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
9334        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9335                             DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
9336                                                                 "with a different number of attachments.");
9337    }
9338    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9339        const VkImageView &image_view = framebufferInfo.pAttachments[i];
9340        auto image_data = dev_data->imageViewMap.find(image_view);
9341        assert(image_data != dev_data->imageViewMap.end());
9342        const VkImage &image = image_data->second.image;
9343        const VkImageSubresourceRange &subRange = image_data->second.subresourceRange;
9344        IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
9345                                             pRenderPassInfo->pAttachments[i].initialLayout};
9346        // TODO: Do not iterate over every possibility - consolidate where possible
9347        for (uint32_t j = 0; j < subRange.levelCount; j++) {
9348            uint32_t level = subRange.baseMipLevel + j;
9349            for (uint32_t k = 0; k < subRange.layerCount; k++) {
9350                uint32_t layer = subRange.baseArrayLayer + k;
9351                VkImageSubresource sub = {subRange.aspectMask, level, layer};
9352                IMAGE_CMD_BUF_LAYOUT_NODE node;
9353                if (!FindLayout(pCB, image, sub, node)) {
9354                    SetLayout(pCB, image, sub, newNode);
9355                    continue;
9356                }
9357                if (newNode.layout != node.layout) {
9358                    skip_call |=
9359                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9360                                DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using attachment %i "
9361                                                                    "where the "
9362                                                                    "initial layout is %s and the layout of the attachment at the "
9363                                                                    "start of the render pass is %s. The layouts must match.",
9364                                i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout));
9365                }
9366            }
9367        }
9368    }
9369    return skip_call;
9370}
9371
9372static void TransitionSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
9373                                     const int subpass_index) {
9374    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9375    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9376    auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9377    if (render_pass_data == dev_data->renderPassMap.end()) {
9378        return;
9379    }
9380    const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo;
9381    auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer);
9382    if (framebuffer_data == dev_data->frameBufferMap.end()) {
9383        return;
9384    }
9385    const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo;
9386    const VkSubpassDescription &subpass = pRenderPassInfo->pSubpasses[subpass_index];
9387    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9388        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pInputAttachments[j].attachment];
9389        SetLayout(dev_data, pCB, image_view, subpass.pInputAttachments[j].layout);
9390    }
9391    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9392        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pColorAttachments[j].attachment];
9393        SetLayout(dev_data, pCB, image_view, subpass.pColorAttachments[j].layout);
9394    }
9395    if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
9396        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pDepthStencilAttachment->attachment];
9397        SetLayout(dev_data, pCB, image_view, subpass.pDepthStencilAttachment->layout);
9398    }
9399}
9400
9401static bool validatePrimaryCommandBuffer(const layer_data *my_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) {
9402    bool skip_call = false;
9403    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
9404        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9405                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.",
9406                             cmd_name.c_str());
9407    }
9408    return skip_call;
9409}
9410
9411static void TransitionFinalSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) {
9412    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9413    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9414    auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9415    if (render_pass_data == dev_data->renderPassMap.end()) {
9416        return;
9417    }
9418    const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo;
9419    auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer);
9420    if (framebuffer_data == dev_data->frameBufferMap.end()) {
9421        return;
9422    }
9423    const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo;
9424    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9425        const VkImageView &image_view = framebufferInfo.pAttachments[i];
9426        SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
9427    }
9428}
9429
9430static bool VerifyRenderAreaBounds(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
9431    bool skip_call = false;
9432    const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo;
9433    if (pRenderPassBegin->renderArea.offset.x < 0 ||
9434        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
9435        pRenderPassBegin->renderArea.offset.y < 0 ||
9436        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
9437        skip_call |= static_cast<bool>(log_msg(
9438            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9439            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
9440            "Cannot execute a render pass with renderArea not within the bound of the "
9441            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
9442            "height %d.",
9443            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
9444            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
9445    }
9446    return skip_call;
9447}
9448
9449VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
9450vkCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
9451    bool skipCall = false;
9452    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9453    loader_platform_thread_lock_mutex(&globalLock);
9454    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9455    if (pCB) {
9456        if (pRenderPassBegin && pRenderPassBegin->renderPass) {
9457#if MTMERGE
9458            auto pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9459            if (pass_data != dev_data->renderPassMap.end()) {
9460                RENDER_PASS_NODE* pRPNode = pass_data->second;
9461                pRPNode->fb = pRenderPassBegin->framebuffer;
9462                auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
9463                for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
9464                    MT_FB_ATTACHMENT_INFO &fb_info = dev_data->frameBufferMap[pRPNode->fb].attachments[i];
9465                    if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
9466                        if (cb_data != dev_data->commandBufferMap.end()) {
9467                            std::function<bool()> function = [=]() {
9468                                set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9469                                return false;
9470                            };
9471                            cb_data->second->validate_functions.push_back(function);
9472                        }
9473                        VkImageLayout &attachment_layout = pRPNode->attachment_first_layout[pRPNode->attachments[i].attachment];
9474                        if (attachment_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL ||
9475                            attachment_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
9476                            skipCall |=
9477                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9478                                        VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, (uint64_t)(pRenderPassBegin->renderPass), __LINE__,
9479                                        MEMTRACK_INVALID_LAYOUT, "MEM", "Cannot clear attachment %d with invalid first layout %d.",
9480                                        pRPNode->attachments[i].attachment, attachment_layout);
9481                        }
9482                    } else if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE) {
9483                        if (cb_data != dev_data->commandBufferMap.end()) {
9484                            std::function<bool()> function = [=]() {
9485                                set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9486                                return false;
9487                            };
9488                            cb_data->second->validate_functions.push_back(function);
9489                        }
9490                    } else if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_LOAD) {
9491                        if (cb_data != dev_data->commandBufferMap.end()) {
9492                            std::function<bool()> function = [=]() {
9493                                return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9494                            };
9495                            cb_data->second->validate_functions.push_back(function);
9496                        }
9497                    }
9498                    if (pRPNode->attachment_first_read[pRPNode->attachments[i].attachment]) {
9499                        if (cb_data != dev_data->commandBufferMap.end()) {
9500                            std::function<bool()> function = [=]() {
9501                                return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9502                            };
9503                            cb_data->second->validate_functions.push_back(function);
9504                        }
9505                    }
9506                }
9507            }
9508#endif
9509            skipCall |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
9510            skipCall |= VerifyFramebufferAndRenderPassLayouts(commandBuffer, pRenderPassBegin);
9511            auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9512            if (render_pass_data != dev_data->renderPassMap.end()) {
9513                skipCall |= ValidateDependencies(dev_data, pRenderPassBegin, render_pass_data->second->subpassToNode);
9514            }
9515            skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBeginRenderPass");
9516            skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdBeginRenderPass");
9517            skipCall |= addCmd(dev_data, pCB, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
9518            pCB->activeRenderPass = pRenderPassBegin->renderPass;
9519            // This is a shallow copy as that is all that is needed for now
9520            pCB->activeRenderPassBeginInfo = *pRenderPassBegin;
9521            pCB->activeSubpass = 0;
9522            pCB->activeSubpassContents = contents;
9523            pCB->framebuffer = pRenderPassBegin->framebuffer;
9524            // Connect this framebuffer to this cmdBuffer
9525            dev_data->frameBufferMap[pCB->framebuffer].referencingCmdBuffers.insert(pCB->commandBuffer);
9526        } else {
9527            skipCall |=
9528                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9529                        DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()");
9530        }
9531    }
9532    loader_platform_thread_unlock_mutex(&globalLock);
9533    if (!skipCall) {
9534        dev_data->device_dispatch_table->CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
9535    }
9536}
9537
9538VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
9539    bool skipCall = false;
9540    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9541    loader_platform_thread_lock_mutex(&globalLock);
9542    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9543    if (pCB) {
9544        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass");
9545        skipCall |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
9546        pCB->activeSubpass++;
9547        pCB->activeSubpassContents = contents;
9548        TransitionSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
9549        if (pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline) {
9550            skipCall |= validatePipelineState(dev_data, pCB, VK_PIPELINE_BIND_POINT_GRAPHICS,
9551                                              pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
9552        }
9553        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass");
9554    }
9555    loader_platform_thread_unlock_mutex(&globalLock);
9556    if (!skipCall)
9557        dev_data->device_dispatch_table->CmdNextSubpass(commandBuffer, contents);
9558}
9559
9560VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(VkCommandBuffer commandBuffer) {
9561    bool skipCall = false;
9562    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9563    loader_platform_thread_lock_mutex(&globalLock);
9564#if MTMERGESOURCE
9565    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
9566    if (cb_data != dev_data->commandBufferMap.end()) {
9567        auto pass_data = dev_data->renderPassMap.find(cb_data->second->activeRenderPass);
9568        if (pass_data != dev_data->renderPassMap.end()) {
9569            RENDER_PASS_NODE* pRPNode = pass_data->second;
9570            for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
9571                MT_FB_ATTACHMENT_INFO &fb_info = dev_data->frameBufferMap[pRPNode->fb].attachments[i];
9572                if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_STORE) {
9573                    if (cb_data != dev_data->commandBufferMap.end()) {
9574                        std::function<bool()> function = [=]() {
9575                            set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9576                            return false;
9577                        };
9578                        cb_data->second->validate_functions.push_back(function);
9579                    }
9580                } else if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_DONT_CARE) {
9581                    if (cb_data != dev_data->commandBufferMap.end()) {
9582                        std::function<bool()> function = [=]() {
9583                            set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9584                            return false;
9585                        };
9586                        cb_data->second->validate_functions.push_back(function);
9587                    }
9588                }
9589            }
9590        }
9591    }
9592#endif
9593    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9594    if (pCB) {
9595        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass");
9596        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass");
9597        skipCall |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
9598        TransitionFinalSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo);
9599        pCB->activeRenderPass = 0;
9600        pCB->activeSubpass = 0;
9601    }
9602    loader_platform_thread_unlock_mutex(&globalLock);
9603    if (!skipCall)
9604        dev_data->device_dispatch_table->CmdEndRenderPass(commandBuffer);
9605}
9606
9607static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass,
9608                                        VkRenderPass primaryPass, uint32_t primaryAttach, uint32_t secondaryAttach,
9609                                        const char *msg) {
9610    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9611                   DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9612                   "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64
9613                   " that is not compatible with the current render pass %" PRIx64 "."
9614                   "Attachment %" PRIu32 " is not compatible with %" PRIu32 ". %s",
9615                   (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass), primaryAttach, secondaryAttach,
9616                   msg);
9617}
9618
9619static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9620                                            uint32_t primaryAttach, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass,
9621                                            uint32_t secondaryAttach, bool is_multi) {
9622    bool skip_call = false;
9623    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9624    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9625    if (primary_data->second->pCreateInfo->attachmentCount <= primaryAttach) {
9626        primaryAttach = VK_ATTACHMENT_UNUSED;
9627    }
9628    if (secondary_data->second->pCreateInfo->attachmentCount <= secondaryAttach) {
9629        secondaryAttach = VK_ATTACHMENT_UNUSED;
9630    }
9631    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
9632        return skip_call;
9633    }
9634    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
9635        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9636                                                 secondaryAttach, "The first is unused while the second is not.");
9637        return skip_call;
9638    }
9639    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
9640        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9641                                                 secondaryAttach, "The second is unused while the first is not.");
9642        return skip_call;
9643    }
9644    if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].format !=
9645        secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].format) {
9646        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9647                                                 secondaryAttach, "They have different formats.");
9648    }
9649    if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].samples !=
9650        secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].samples) {
9651        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9652                                                 secondaryAttach, "They have different samples.");
9653    }
9654    if (is_multi &&
9655        primary_data->second->pCreateInfo->pAttachments[primaryAttach].flags !=
9656            secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].flags) {
9657        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9658                                                 secondaryAttach, "They have different flags.");
9659    }
9660    return skip_call;
9661}
9662
9663static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9664                                         VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass, const int subpass,
9665                                         bool is_multi) {
9666    bool skip_call = false;
9667    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9668    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9669    const VkSubpassDescription &primary_desc = primary_data->second->pCreateInfo->pSubpasses[subpass];
9670    const VkSubpassDescription &secondary_desc = secondary_data->second->pCreateInfo->pSubpasses[subpass];
9671    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
9672    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
9673        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
9674        if (i < primary_desc.inputAttachmentCount) {
9675            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
9676        }
9677        if (i < secondary_desc.inputAttachmentCount) {
9678            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
9679        }
9680        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_input_attach, secondaryBuffer,
9681                                                     secondaryPass, secondary_input_attach, is_multi);
9682    }
9683    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
9684    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
9685        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
9686        if (i < primary_desc.colorAttachmentCount) {
9687            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
9688        }
9689        if (i < secondary_desc.colorAttachmentCount) {
9690            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
9691        }
9692        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_color_attach, secondaryBuffer,
9693                                                     secondaryPass, secondary_color_attach, is_multi);
9694        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
9695        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
9696            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
9697        }
9698        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
9699            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
9700        }
9701        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_resolve_attach, secondaryBuffer,
9702                                                     secondaryPass, secondary_resolve_attach, is_multi);
9703    }
9704    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
9705    if (primary_desc.pDepthStencilAttachment) {
9706        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
9707    }
9708    if (secondary_desc.pDepthStencilAttachment) {
9709        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
9710    }
9711    skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_depthstencil_attach, secondaryBuffer,
9712                                                 secondaryPass, secondary_depthstencil_attach, is_multi);
9713    return skip_call;
9714}
9715
9716static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9717                                            VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass) {
9718    bool skip_call = false;
9719    // Early exit if renderPass objects are identical (and therefore compatible)
9720    if (primaryPass == secondaryPass)
9721        return skip_call;
9722    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9723    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9724    if (primary_data == dev_data->renderPassMap.end() || primary_data->second == nullptr) {
9725        skip_call |=
9726            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9727                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9728                    "vkCmdExecuteCommands() called w/ invalid current Cmd Buffer %p which has invalid render pass %" PRIx64 ".",
9729                    (void *)primaryBuffer, (uint64_t)(primaryPass));
9730        return skip_call;
9731    }
9732    if (secondary_data == dev_data->renderPassMap.end() || secondary_data->second == nullptr) {
9733        skip_call |=
9734            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9735                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9736                    "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer %p which has invalid render pass %" PRIx64 ".",
9737                    (void *)secondaryBuffer, (uint64_t)(secondaryPass));
9738        return skip_call;
9739    }
9740    if (primary_data->second->pCreateInfo->subpassCount != secondary_data->second->pCreateInfo->subpassCount) {
9741        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9742                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9743                             "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64
9744                             " that is not compatible with the current render pass %" PRIx64 "."
9745                             "They have a different number of subpasses.",
9746                             (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass));
9747        return skip_call;
9748    }
9749    bool is_multi = primary_data->second->pCreateInfo->subpassCount > 1;
9750    for (uint32_t i = 0; i < primary_data->second->pCreateInfo->subpassCount; ++i) {
9751        skip_call |=
9752            validateSubpassCompatibility(dev_data, primaryBuffer, primaryPass, secondaryBuffer, secondaryPass, i, is_multi);
9753    }
9754    return skip_call;
9755}
9756
9757static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
9758                                VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
9759    bool skip_call = false;
9760    if (!pSubCB->beginInfo.pInheritanceInfo) {
9761        return skip_call;
9762    }
9763    VkFramebuffer primary_fb = pCB->framebuffer;
9764    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
9765    if (secondary_fb != VK_NULL_HANDLE) {
9766        if (primary_fb != secondary_fb) {
9767            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9768                                 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9769                                 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a framebuffer %" PRIx64
9770                                 " that is not compatible with the current framebuffer %" PRIx64 ".",
9771                                 (void *)secondaryBuffer, (uint64_t)(secondary_fb), (uint64_t)(primary_fb));
9772        }
9773        auto fb_data = dev_data->frameBufferMap.find(secondary_fb);
9774        if (fb_data == dev_data->frameBufferMap.end()) {
9775            skip_call |=
9776                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9777                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
9778                                                                          "which has invalid framebuffer %" PRIx64 ".",
9779                        (void *)secondaryBuffer, (uint64_t)(secondary_fb));
9780            return skip_call;
9781        }
9782        skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb_data->second.createInfo.renderPass,
9783                                                     secondaryBuffer, pSubCB->beginInfo.pInheritanceInfo->renderPass);
9784    }
9785    return skip_call;
9786}
9787
9788static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
9789    bool skipCall = false;
9790    unordered_set<int> activeTypes;
9791    for (auto queryObject : pCB->activeQueries) {
9792        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9793        if (queryPoolData != dev_data->queryPoolMap.end()) {
9794            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
9795                pSubCB->beginInfo.pInheritanceInfo) {
9796                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
9797                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
9798                    skipCall |= log_msg(
9799                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9800                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9801                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
9802                        "which has invalid active query pool %" PRIx64 ". Pipeline statistics is being queried so the command "
9803                        "buffer must have all bits set on the queryPool.",
9804                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first));
9805                }
9806            }
9807            activeTypes.insert(queryPoolData->second.createInfo.queryType);
9808        }
9809    }
9810    for (auto queryObject : pSubCB->startedQueries) {
9811        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9812        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
9813            skipCall |=
9814                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9815                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9816                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
9817                        "which has invalid active query pool %" PRIx64 "of type %d but a query of that type has been started on "
9818                        "secondary Cmd Buffer %p.",
9819                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first),
9820                        queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer));
9821        }
9822    }
9823    return skipCall;
9824}
9825
9826VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
9827vkCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
9828    bool skipCall = false;
9829    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9830    loader_platform_thread_lock_mutex(&globalLock);
9831    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9832    if (pCB) {
9833        GLOBAL_CB_NODE *pSubCB = NULL;
9834        for (uint32_t i = 0; i < commandBuffersCount; i++) {
9835            pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
9836            if (!pSubCB) {
9837                skipCall |=
9838                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9839                            DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9840                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p in element %u of pCommandBuffers array.",
9841                            (void *)pCommandBuffers[i], i);
9842            } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
9843                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9844                                    __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9845                                    "vkCmdExecuteCommands() called w/ Primary Cmd Buffer %p in element %u of pCommandBuffers "
9846                                    "array. All cmd buffers in pCommandBuffers array must be secondary.",
9847                                    (void *)pCommandBuffers[i], i);
9848            } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
9849                if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
9850                    skipCall |= log_msg(
9851                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9852                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
9853                        "vkCmdExecuteCommands(): Secondary Command Buffer (%p) executed within render pass (%#" PRIxLEAST64
9854                        ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
9855                        (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass);
9856                } else {
9857                    // Make sure render pass is compatible with parent command buffer pass if has continue
9858                    skipCall |= validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass, pCommandBuffers[i],
9859                                                                pSubCB->beginInfo.pInheritanceInfo->renderPass);
9860                    skipCall |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
9861                }
9862                string errorString = "";
9863                if (!verify_renderpass_compatibility(dev_data, pCB->activeRenderPass,
9864                                                     pSubCB->beginInfo.pInheritanceInfo->renderPass, errorString)) {
9865                    skipCall |= log_msg(
9866                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9867                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
9868                        "vkCmdExecuteCommands(): Secondary Command Buffer (%p) w/ render pass (%#" PRIxLEAST64
9869                        ") is incompatible w/ primary command buffer (%p) w/ render pass (%#" PRIxLEAST64 ") due to: %s",
9870                        (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer,
9871                        (uint64_t)pCB->activeRenderPass, errorString.c_str());
9872                }
9873                //  If framebuffer for secondary CB is not NULL, then it must match FB from vkCmdBeginRenderPass()
9874                //   that this CB will be executed in AND framebuffer must have been created w/ RP compatible w/ renderpass
9875                if (pSubCB->beginInfo.pInheritanceInfo->framebuffer) {
9876                    if (pSubCB->beginInfo.pInheritanceInfo->framebuffer != pCB->activeRenderPassBeginInfo.framebuffer) {
9877                        skipCall |= log_msg(
9878                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9879                            (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS",
9880                            "vkCmdExecuteCommands(): Secondary Command Buffer (%p) references framebuffer (%#" PRIxLEAST64
9881                            ") that does not match framebuffer (%#" PRIxLEAST64 ") in active renderpass (%#" PRIxLEAST64 ").",
9882                            (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->framebuffer,
9883                            (uint64_t)pCB->activeRenderPassBeginInfo.framebuffer, (uint64_t)pCB->activeRenderPass);
9884                    }
9885                }
9886            }
9887            // TODO(mlentine): Move more logic into this method
9888            skipCall |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
9889            skipCall |= validateCommandBufferState(dev_data, pSubCB);
9890            // Secondary cmdBuffers are considered pending execution starting w/
9891            // being recorded
9892            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
9893                if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
9894                    skipCall |= log_msg(
9895                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9896                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
9897                        "Attempt to simultaneously execute CB %#" PRIxLEAST64 " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
9898                        "set!",
9899                        (uint64_t)(pCB->commandBuffer));
9900                }
9901                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
9902                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
9903                    skipCall |= log_msg(
9904                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9905                        (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
9906                        "vkCmdExecuteCommands(): Secondary Command Buffer (%#" PRIxLEAST64
9907                        ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
9908                        "(%#" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
9909                                          "set, even though it does.",
9910                        (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer));
9911                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
9912                }
9913            }
9914            if (!pCB->activeQueries.empty() && !dev_data->phys_dev_properties.features.inheritedQueries) {
9915                skipCall |=
9916                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9917                            reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
9918                            "vkCmdExecuteCommands(): Secondary Command Buffer "
9919                            "(%#" PRIxLEAST64 ") cannot be submitted with a query in "
9920                            "flight and inherited queries not "
9921                            "supported on this device.",
9922                            reinterpret_cast<uint64_t>(pCommandBuffers[i]));
9923            }
9924            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
9925            pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
9926            dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
9927        }
9928        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands");
9929        skipCall |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
9930    }
9931    loader_platform_thread_unlock_mutex(&globalLock);
9932    if (!skipCall)
9933        dev_data->device_dispatch_table->CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
9934}
9935
9936static bool ValidateMapImageLayouts(VkDevice device, VkDeviceMemory mem) {
9937    bool skip_call = false;
9938    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9939    auto mem_data = dev_data->memObjMap.find(mem);
9940    if ((mem_data != dev_data->memObjMap.end()) && (mem_data->second.image != VK_NULL_HANDLE)) {
9941        std::vector<VkImageLayout> layouts;
9942        if (FindLayouts(dev_data, mem_data->second.image, layouts)) {
9943            for (auto layout : layouts) {
9944                if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
9945                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9946                                         __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
9947                                                                                         "GENERAL or PREINITIALIZED are supported.",
9948                                         string_VkImageLayout(layout));
9949                }
9950            }
9951        }
9952    }
9953    return skip_call;
9954}
9955
9956VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
9957vkMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
9958    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9959
9960    bool skip_call = false;
9961    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9962    loader_platform_thread_lock_mutex(&globalLock);
9963#if MTMERGESOURCE
9964    DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
9965    if (pMemObj) {
9966        pMemObj->valid = true;
9967        if ((dev_data->phys_dev_mem_props.memoryTypes[pMemObj->allocInfo.memoryTypeIndex].propertyFlags &
9968             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
9969            skip_call =
9970                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9971                        (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM",
9972                        "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj %#" PRIxLEAST64, (uint64_t)mem);
9973        }
9974    }
9975    skip_call |= validateMemRange(dev_data, mem, offset, size);
9976    storeMemRanges(dev_data, mem, offset, size);
9977#endif
9978    skip_call |= ValidateMapImageLayouts(device, mem);
9979    loader_platform_thread_unlock_mutex(&globalLock);
9980
9981    if (!skip_call) {
9982        result = dev_data->device_dispatch_table->MapMemory(device, mem, offset, size, flags, ppData);
9983#if MTMERGESOURCE
9984        loader_platform_thread_lock_mutex(&globalLock);
9985        initializeAndTrackMemory(dev_data, mem, size, ppData);
9986        loader_platform_thread_unlock_mutex(&globalLock);
9987#endif
9988    }
9989    return result;
9990}
9991
9992#if MTMERGESOURCE
9993VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkUnmapMemory(VkDevice device, VkDeviceMemory mem) {
9994    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9995    bool skipCall = false;
9996
9997    loader_platform_thread_lock_mutex(&globalLock);
9998    skipCall |= deleteMemRanges(my_data, mem);
9999    loader_platform_thread_unlock_mutex(&globalLock);
10000    if (!skipCall) {
10001        my_data->device_dispatch_table->UnmapMemory(device, mem);
10002    }
10003}
10004
10005static bool validateMemoryIsMapped(layer_data *my_data, const char *funcName, uint32_t memRangeCount,
10006                                   const VkMappedMemoryRange *pMemRanges) {
10007    bool skipCall = false;
10008    for (uint32_t i = 0; i < memRangeCount; ++i) {
10009        auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
10010        if (mem_element != my_data->memObjMap.end()) {
10011            if (mem_element->second.memRange.offset > pMemRanges[i].offset) {
10012                skipCall |= log_msg(
10013                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10014                    (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10015                    "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
10016                    "(" PRINTF_SIZE_T_SPECIFIER ").",
10017                    funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_element->second.memRange.offset));
10018            }
10019            if ((mem_element->second.memRange.size != VK_WHOLE_SIZE) &&
10020                ((mem_element->second.memRange.offset + mem_element->second.memRange.size) <
10021                 (pMemRanges[i].offset + pMemRanges[i].size))) {
10022                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10023                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10024                                    MEMTRACK_INVALID_MAP, "MEM", "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER
10025                                                                 ") exceeds the Memory Object's upper-bound "
10026                                                                 "(" PRINTF_SIZE_T_SPECIFIER ").",
10027                                    funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
10028                                    static_cast<size_t>(mem_element->second.memRange.offset + mem_element->second.memRange.size));
10029            }
10030        }
10031    }
10032    return skipCall;
10033}
10034
10035static bool validateAndCopyNoncoherentMemoryToDriver(layer_data *my_data, uint32_t memRangeCount,
10036                                                     const VkMappedMemoryRange *pMemRanges) {
10037    bool skipCall = false;
10038    for (uint32_t i = 0; i < memRangeCount; ++i) {
10039        auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
10040        if (mem_element != my_data->memObjMap.end()) {
10041            if (mem_element->second.pData) {
10042                VkDeviceSize size = mem_element->second.memRange.size;
10043                VkDeviceSize half_size = (size / 2);
10044                char *data = static_cast<char *>(mem_element->second.pData);
10045                for (auto j = 0; j < half_size; ++j) {
10046                    if (data[j] != NoncoherentMemoryFillValue) {
10047                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10048                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10049                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64,
10050                                            (uint64_t)pMemRanges[i].memory);
10051                    }
10052                }
10053                for (auto j = size + half_size; j < 2 * size; ++j) {
10054                    if (data[j] != NoncoherentMemoryFillValue) {
10055                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10056                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10057                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64,
10058                                            (uint64_t)pMemRanges[i].memory);
10059                    }
10060                }
10061                memcpy(mem_element->second.pDriverData, static_cast<void *>(data + (size_t)(half_size)), (size_t)(size));
10062            }
10063        }
10064    }
10065    return skipCall;
10066}
10067
10068VK_LAYER_EXPORT VkResult VKAPI_CALL
10069vkFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10070    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10071    bool skipCall = false;
10072    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10073
10074    loader_platform_thread_lock_mutex(&globalLock);
10075    skipCall |= validateAndCopyNoncoherentMemoryToDriver(my_data, memRangeCount, pMemRanges);
10076    skipCall |= validateMemoryIsMapped(my_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
10077    loader_platform_thread_unlock_mutex(&globalLock);
10078    if (!skipCall) {
10079        result = my_data->device_dispatch_table->FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
10080    }
10081    return result;
10082}
10083
10084VK_LAYER_EXPORT VkResult VKAPI_CALL
10085vkInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10086    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10087    bool skipCall = false;
10088    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10089
10090    loader_platform_thread_lock_mutex(&globalLock);
10091    skipCall |= validateMemoryIsMapped(my_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
10092    loader_platform_thread_unlock_mutex(&globalLock);
10093    if (!skipCall) {
10094        result = my_data->device_dispatch_table->InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
10095    }
10096    return result;
10097}
10098#endif
10099
10100VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
10101    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10102    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10103    bool skipCall = false;
10104    loader_platform_thread_lock_mutex(&globalLock);
10105    auto image_node = dev_data->imageMap.find(image);
10106    if (image_node != dev_data->imageMap.end()) {
10107        // Track objects tied to memory
10108        uint64_t image_handle = reinterpret_cast<uint64_t&>(image);
10109        skipCall = set_mem_binding(dev_data, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
10110        VkMemoryRequirements memRequirements;
10111        loader_platform_thread_unlock_mutex(&globalLock);
10112        dev_data->device_dispatch_table->GetImageMemoryRequirements(device, image, &memRequirements);
10113        loader_platform_thread_lock_mutex(&globalLock);
10114        skipCall |= validate_buffer_image_aliasing(dev_data, image_handle, mem, memoryOffset, memRequirements,
10115                                                   dev_data->memObjMap[mem].imageRanges, dev_data->memObjMap[mem].bufferRanges,
10116                                                   VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
10117        print_mem_list(dev_data);
10118        loader_platform_thread_unlock_mutex(&globalLock);
10119        if (!skipCall) {
10120            result = dev_data->device_dispatch_table->BindImageMemory(device, image, mem, memoryOffset);
10121            loader_platform_thread_lock_mutex(&globalLock);
10122            dev_data->memObjMap[mem].image = image;
10123            image_node->second.mem = mem;
10124            image_node->second.memOffset = memoryOffset;
10125            image_node->second.memSize = memRequirements.size;
10126            loader_platform_thread_unlock_mutex(&globalLock);
10127        }
10128    } else {
10129        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10130                reinterpret_cast<const uint64_t &>(image), __LINE__, MEMTRACK_INVALID_OBJECT, "MT",
10131                "vkBindImageMemory: Cannot find invalid image %" PRIx64 ", has it already been deleted?",
10132                reinterpret_cast<const uint64_t &>(image));
10133    }
10134    return result;
10135}
10136
10137VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent(VkDevice device, VkEvent event) {
10138    bool skip_call = false;
10139    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10140    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10141    loader_platform_thread_lock_mutex(&globalLock);
10142    auto event_node = dev_data->eventMap.find(event);
10143    if (event_node != dev_data->eventMap.end()) {
10144        event_node->second.needsSignaled = false;
10145        event_node->second.stageMask = VK_PIPELINE_STAGE_HOST_BIT;
10146        if (event_node->second.in_use.load()) {
10147            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
10148                                 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10149                                 "Cannot call vkSetEvent() on event %" PRIxLEAST64 " that is already in use by a command buffer.",
10150                                 reinterpret_cast<const uint64_t &>(event));
10151        }
10152    }
10153    loader_platform_thread_unlock_mutex(&globalLock);
10154    // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
10155    // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
10156    // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
10157    for (auto queue_data : dev_data->queueMap) {
10158        auto event_entry = queue_data.second.eventToStageMap.find(event);
10159        if (event_entry != queue_data.second.eventToStageMap.end()) {
10160            event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
10161        }
10162    }
10163    if (!skip_call)
10164        result = dev_data->device_dispatch_table->SetEvent(device, event);
10165    return result;
10166}
10167
10168VKAPI_ATTR VkResult VKAPI_CALL
10169vkQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
10170    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10171    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10172    bool skip_call = false;
10173    loader_platform_thread_lock_mutex(&globalLock);
10174    // First verify that fence is not in use
10175    if ((fence != VK_NULL_HANDLE) && (bindInfoCount != 0) && dev_data->fenceMap[fence].in_use.load()) {
10176        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
10177                             reinterpret_cast<uint64_t &>(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
10178                             "Fence %#" PRIx64 " is already in use by another submission.", reinterpret_cast<uint64_t &>(fence));
10179    }
10180    uint64_t fenceId = 0;
10181    skip_call = add_fence_info(dev_data, fence, queue, &fenceId);
10182    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
10183        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
10184        // Track objects tied to memory
10185        for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
10186            for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
10187                if (set_sparse_mem_binding(dev_data, bindInfo.pBufferBinds[j].pBinds[k].memory,
10188                                           (uint64_t)bindInfo.pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
10189                                           "vkQueueBindSparse"))
10190                    skip_call = true;
10191            }
10192        }
10193        for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
10194            for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
10195                if (set_sparse_mem_binding(dev_data, bindInfo.pImageOpaqueBinds[j].pBinds[k].memory,
10196                                           (uint64_t)bindInfo.pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10197                                           "vkQueueBindSparse"))
10198                    skip_call = true;
10199            }
10200        }
10201        for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
10202            for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
10203                if (set_sparse_mem_binding(dev_data, bindInfo.pImageBinds[j].pBinds[k].memory,
10204                                           (uint64_t)bindInfo.pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10205                                           "vkQueueBindSparse"))
10206                    skip_call = true;
10207            }
10208        }
10209        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
10210            const VkSemaphore &semaphore = bindInfo.pWaitSemaphores[i];
10211            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
10212                if (dev_data->semaphoreMap[semaphore].signaled) {
10213                    dev_data->semaphoreMap[semaphore].signaled = false;
10214                } else {
10215                    skip_call |=
10216                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10217                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10218                                "vkQueueBindSparse: Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64
10219                                " that has no way to be signaled.",
10220                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
10221                }
10222            }
10223        }
10224        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
10225            const VkSemaphore &semaphore = bindInfo.pSignalSemaphores[i];
10226            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
10227                if (dev_data->semaphoreMap[semaphore].signaled) {
10228                    skip_call =
10229                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10230                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10231                                "vkQueueBindSparse: Queue %#" PRIx64 " is signaling semaphore %#" PRIx64
10232                                ", but that semaphore is already signaled.",
10233                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
10234                }
10235                dev_data->semaphoreMap[semaphore].signaled = true;
10236            }
10237        }
10238    }
10239    print_mem_list(dev_data);
10240    loader_platform_thread_unlock_mutex(&globalLock);
10241
10242    if (!skip_call)
10243        return dev_data->device_dispatch_table->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
10244
10245    return result;
10246}
10247
10248VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
10249                                                 const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
10250    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10251    VkResult result = dev_data->device_dispatch_table->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
10252    if (result == VK_SUCCESS) {
10253        loader_platform_thread_lock_mutex(&globalLock);
10254        SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
10255        sNode->signaled = false;
10256        sNode->queue = VK_NULL_HANDLE;
10257        sNode->in_use.store(0);
10258        loader_platform_thread_unlock_mutex(&globalLock);
10259    }
10260    return result;
10261}
10262
10263VKAPI_ATTR VkResult VKAPI_CALL
10264vkCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
10265    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10266    VkResult result = dev_data->device_dispatch_table->CreateEvent(device, pCreateInfo, pAllocator, pEvent);
10267    if (result == VK_SUCCESS) {
10268        loader_platform_thread_lock_mutex(&globalLock);
10269        dev_data->eventMap[*pEvent].needsSignaled = false;
10270        dev_data->eventMap[*pEvent].in_use.store(0);
10271        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
10272        loader_platform_thread_unlock_mutex(&globalLock);
10273    }
10274    return result;
10275}
10276
10277VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
10278                                                                    const VkAllocationCallbacks *pAllocator,
10279                                                                    VkSwapchainKHR *pSwapchain) {
10280    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10281    VkResult result = dev_data->device_dispatch_table->CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
10282
10283    if (VK_SUCCESS == result) {
10284        SWAPCHAIN_NODE *psc_node = new SWAPCHAIN_NODE(pCreateInfo);
10285        loader_platform_thread_lock_mutex(&globalLock);
10286        dev_data->device_extensions.swapchainMap[*pSwapchain] = psc_node;
10287        loader_platform_thread_unlock_mutex(&globalLock);
10288    }
10289
10290    return result;
10291}
10292
10293VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
10294vkDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
10295    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10296    bool skipCall = false;
10297
10298    loader_platform_thread_lock_mutex(&globalLock);
10299    auto swapchain_data = dev_data->device_extensions.swapchainMap.find(swapchain);
10300    if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
10301        if (swapchain_data->second->images.size() > 0) {
10302            for (auto swapchain_image : swapchain_data->second->images) {
10303                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
10304                if (image_sub != dev_data->imageSubresourceMap.end()) {
10305                    for (auto imgsubpair : image_sub->second) {
10306                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
10307                        if (image_item != dev_data->imageLayoutMap.end()) {
10308                            dev_data->imageLayoutMap.erase(image_item);
10309                        }
10310                    }
10311                    dev_data->imageSubresourceMap.erase(image_sub);
10312                }
10313                skipCall = clear_object_binding(dev_data, (uint64_t)swapchain_image,
10314                                                VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
10315                dev_data->imageMap.erase(swapchain_image);
10316            }
10317        }
10318        delete swapchain_data->second;
10319        dev_data->device_extensions.swapchainMap.erase(swapchain);
10320    }
10321    loader_platform_thread_unlock_mutex(&globalLock);
10322    if (!skipCall)
10323        dev_data->device_dispatch_table->DestroySwapchainKHR(device, swapchain, pAllocator);
10324}
10325
10326VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10327vkGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
10328    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10329    VkResult result = dev_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
10330
10331    if (result == VK_SUCCESS && pSwapchainImages != NULL) {
10332        // This should never happen and is checked by param checker.
10333        if (!pCount)
10334            return result;
10335        loader_platform_thread_lock_mutex(&globalLock);
10336        const size_t count = *pCount;
10337        auto swapchain_node = dev_data->device_extensions.swapchainMap[swapchain];
10338        if (!swapchain_node->images.empty()) {
10339            // TODO : Not sure I like the memcmp here, but it works
10340            const bool mismatch = (swapchain_node->images.size() != count ||
10341                                   memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
10342            if (mismatch) {
10343                // TODO: Verify against Valid Usage section of extension
10344                log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10345                        (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
10346                        "vkGetSwapchainInfoKHR(%" PRIu64
10347                        ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
10348                        (uint64_t)(swapchain));
10349            }
10350        }
10351        for (uint32_t i = 0; i < *pCount; ++i) {
10352            IMAGE_LAYOUT_NODE image_layout_node;
10353            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
10354            image_layout_node.format = swapchain_node->createInfo.imageFormat;
10355            auto &image_node = dev_data->imageMap[pSwapchainImages[i]];
10356            image_node.createInfo.mipLevels = 1;
10357            image_node.createInfo.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
10358            image_node.createInfo.usage = swapchain_node->createInfo.imageUsage;
10359            image_node.valid = false;
10360            image_node.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
10361            swapchain_node->images.push_back(pSwapchainImages[i]);
10362            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
10363            dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
10364            dev_data->imageLayoutMap[subpair] = image_layout_node;
10365            dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
10366        }
10367        loader_platform_thread_unlock_mutex(&globalLock);
10368    }
10369    return result;
10370}
10371
10372VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
10373    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10374    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10375    bool skip_call = false;
10376
10377    if (pPresentInfo) {
10378        loader_platform_thread_lock_mutex(&globalLock);
10379        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10380            const VkSemaphore &semaphore = pPresentInfo->pWaitSemaphores[i];
10381            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
10382                if (dev_data->semaphoreMap[semaphore].signaled) {
10383                    dev_data->semaphoreMap[semaphore].signaled = false;
10384                } else {
10385                    skip_call |=
10386                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10387                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10388                                "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
10389                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
10390                }
10391            }
10392        }
10393        VkDeviceMemory mem;
10394        for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
10395            auto swapchain_data = dev_data->device_extensions.swapchainMap.find(pPresentInfo->pSwapchains[i]);
10396            if (swapchain_data != dev_data->device_extensions.swapchainMap.end() &&
10397                pPresentInfo->pImageIndices[i] < swapchain_data->second->images.size()) {
10398                VkImage image = swapchain_data->second->images[pPresentInfo->pImageIndices[i]];
10399#if MTMERGESOURCE
10400                skip_call |=
10401                    get_mem_binding_from_object(dev_data, (uint64_t)(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
10402                skip_call |= validate_memory_is_valid(dev_data, mem, "vkQueuePresentKHR()", image);
10403#endif
10404                vector<VkImageLayout> layouts;
10405                if (FindLayouts(dev_data, image, layouts)) {
10406                    for (auto layout : layouts) {
10407                        if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
10408                            skip_call |=
10409                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
10410                                        reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10411                                        "Images passed to present must be in layout "
10412                                        "PRESENT_SOURCE_KHR but is in %s",
10413                                        string_VkImageLayout(layout));
10414                        }
10415                    }
10416                }
10417            }
10418        }
10419        loader_platform_thread_unlock_mutex(&globalLock);
10420    }
10421
10422    if (!skip_call)
10423        result = dev_data->device_dispatch_table->QueuePresentKHR(queue, pPresentInfo);
10424
10425    return result;
10426}
10427
10428VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
10429                                                     VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
10430    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10431    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10432    bool skipCall = false;
10433
10434    loader_platform_thread_lock_mutex(&globalLock);
10435    if (semaphore != VK_NULL_HANDLE &&
10436        dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
10437        if (dev_data->semaphoreMap[semaphore].signaled) {
10438            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10439                               reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10440                               "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state");
10441        }
10442        dev_data->semaphoreMap[semaphore].signaled = true;
10443    }
10444    auto fence_data = dev_data->fenceMap.find(fence);
10445    if (fence_data != dev_data->fenceMap.end()) {
10446        fence_data->second.swapchain = swapchain;
10447    }
10448    loader_platform_thread_unlock_mutex(&globalLock);
10449
10450    if (!skipCall) {
10451        result =
10452            dev_data->device_dispatch_table->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
10453    }
10454
10455    return result;
10456}
10457
10458VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10459vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
10460                               const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
10461    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10462    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10463    VkResult res = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
10464    if (VK_SUCCESS == res) {
10465        loader_platform_thread_lock_mutex(&globalLock);
10466        res = layer_create_msg_callback(my_data->report_data, pCreateInfo, pAllocator, pMsgCallback);
10467        loader_platform_thread_unlock_mutex(&globalLock);
10468    }
10469    return res;
10470}
10471
10472VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkInstance instance,
10473                                                                           VkDebugReportCallbackEXT msgCallback,
10474                                                                           const VkAllocationCallbacks *pAllocator) {
10475    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10476    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10477    pTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
10478    loader_platform_thread_lock_mutex(&globalLock);
10479    layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator);
10480    loader_platform_thread_unlock_mutex(&globalLock);
10481}
10482
10483VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
10484vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
10485                        size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
10486    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10487    my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
10488                                                            pMsg);
10489}
10490
10491VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
10492    if (!strcmp(funcName, "vkGetDeviceProcAddr"))
10493        return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
10494    if (!strcmp(funcName, "vkDestroyDevice"))
10495        return (PFN_vkVoidFunction)vkDestroyDevice;
10496    if (!strcmp(funcName, "vkQueueSubmit"))
10497        return (PFN_vkVoidFunction)vkQueueSubmit;
10498    if (!strcmp(funcName, "vkWaitForFences"))
10499        return (PFN_vkVoidFunction)vkWaitForFences;
10500    if (!strcmp(funcName, "vkGetFenceStatus"))
10501        return (PFN_vkVoidFunction)vkGetFenceStatus;
10502    if (!strcmp(funcName, "vkQueueWaitIdle"))
10503        return (PFN_vkVoidFunction)vkQueueWaitIdle;
10504    if (!strcmp(funcName, "vkDeviceWaitIdle"))
10505        return (PFN_vkVoidFunction)vkDeviceWaitIdle;
10506    if (!strcmp(funcName, "vkGetDeviceQueue"))
10507        return (PFN_vkVoidFunction)vkGetDeviceQueue;
10508    if (!strcmp(funcName, "vkDestroyInstance"))
10509        return (PFN_vkVoidFunction)vkDestroyInstance;
10510    if (!strcmp(funcName, "vkDestroyDevice"))
10511        return (PFN_vkVoidFunction)vkDestroyDevice;
10512    if (!strcmp(funcName, "vkDestroyFence"))
10513        return (PFN_vkVoidFunction)vkDestroyFence;
10514    if (!strcmp(funcName, "vkResetFences"))
10515        return (PFN_vkVoidFunction)vkResetFences;
10516    if (!strcmp(funcName, "vkDestroySemaphore"))
10517        return (PFN_vkVoidFunction)vkDestroySemaphore;
10518    if (!strcmp(funcName, "vkDestroyEvent"))
10519        return (PFN_vkVoidFunction)vkDestroyEvent;
10520    if (!strcmp(funcName, "vkDestroyQueryPool"))
10521        return (PFN_vkVoidFunction)vkDestroyQueryPool;
10522    if (!strcmp(funcName, "vkDestroyBuffer"))
10523        return (PFN_vkVoidFunction)vkDestroyBuffer;
10524    if (!strcmp(funcName, "vkDestroyBufferView"))
10525        return (PFN_vkVoidFunction)vkDestroyBufferView;
10526    if (!strcmp(funcName, "vkDestroyImage"))
10527        return (PFN_vkVoidFunction)vkDestroyImage;
10528    if (!strcmp(funcName, "vkDestroyImageView"))
10529        return (PFN_vkVoidFunction)vkDestroyImageView;
10530    if (!strcmp(funcName, "vkDestroyShaderModule"))
10531        return (PFN_vkVoidFunction)vkDestroyShaderModule;
10532    if (!strcmp(funcName, "vkDestroyPipeline"))
10533        return (PFN_vkVoidFunction)vkDestroyPipeline;
10534    if (!strcmp(funcName, "vkDestroyPipelineLayout"))
10535        return (PFN_vkVoidFunction)vkDestroyPipelineLayout;
10536    if (!strcmp(funcName, "vkDestroySampler"))
10537        return (PFN_vkVoidFunction)vkDestroySampler;
10538    if (!strcmp(funcName, "vkDestroyDescriptorSetLayout"))
10539        return (PFN_vkVoidFunction)vkDestroyDescriptorSetLayout;
10540    if (!strcmp(funcName, "vkDestroyDescriptorPool"))
10541        return (PFN_vkVoidFunction)vkDestroyDescriptorPool;
10542    if (!strcmp(funcName, "vkDestroyFramebuffer"))
10543        return (PFN_vkVoidFunction)vkDestroyFramebuffer;
10544    if (!strcmp(funcName, "vkDestroyRenderPass"))
10545        return (PFN_vkVoidFunction)vkDestroyRenderPass;
10546    if (!strcmp(funcName, "vkCreateBuffer"))
10547        return (PFN_vkVoidFunction)vkCreateBuffer;
10548    if (!strcmp(funcName, "vkCreateBufferView"))
10549        return (PFN_vkVoidFunction)vkCreateBufferView;
10550    if (!strcmp(funcName, "vkCreateImage"))
10551        return (PFN_vkVoidFunction)vkCreateImage;
10552    if (!strcmp(funcName, "vkCreateImageView"))
10553        return (PFN_vkVoidFunction)vkCreateImageView;
10554    if (!strcmp(funcName, "vkCreateFence"))
10555        return (PFN_vkVoidFunction)vkCreateFence;
10556    if (!strcmp(funcName, "CreatePipelineCache"))
10557        return (PFN_vkVoidFunction)vkCreatePipelineCache;
10558    if (!strcmp(funcName, "DestroyPipelineCache"))
10559        return (PFN_vkVoidFunction)vkDestroyPipelineCache;
10560    if (!strcmp(funcName, "GetPipelineCacheData"))
10561        return (PFN_vkVoidFunction)vkGetPipelineCacheData;
10562    if (!strcmp(funcName, "MergePipelineCaches"))
10563        return (PFN_vkVoidFunction)vkMergePipelineCaches;
10564    if (!strcmp(funcName, "vkCreateGraphicsPipelines"))
10565        return (PFN_vkVoidFunction)vkCreateGraphicsPipelines;
10566    if (!strcmp(funcName, "vkCreateComputePipelines"))
10567        return (PFN_vkVoidFunction)vkCreateComputePipelines;
10568    if (!strcmp(funcName, "vkCreateSampler"))
10569        return (PFN_vkVoidFunction)vkCreateSampler;
10570    if (!strcmp(funcName, "vkCreateDescriptorSetLayout"))
10571        return (PFN_vkVoidFunction)vkCreateDescriptorSetLayout;
10572    if (!strcmp(funcName, "vkCreatePipelineLayout"))
10573        return (PFN_vkVoidFunction)vkCreatePipelineLayout;
10574    if (!strcmp(funcName, "vkCreateDescriptorPool"))
10575        return (PFN_vkVoidFunction)vkCreateDescriptorPool;
10576    if (!strcmp(funcName, "vkResetDescriptorPool"))
10577        return (PFN_vkVoidFunction)vkResetDescriptorPool;
10578    if (!strcmp(funcName, "vkAllocateDescriptorSets"))
10579        return (PFN_vkVoidFunction)vkAllocateDescriptorSets;
10580    if (!strcmp(funcName, "vkFreeDescriptorSets"))
10581        return (PFN_vkVoidFunction)vkFreeDescriptorSets;
10582    if (!strcmp(funcName, "vkUpdateDescriptorSets"))
10583        return (PFN_vkVoidFunction)vkUpdateDescriptorSets;
10584    if (!strcmp(funcName, "vkCreateCommandPool"))
10585        return (PFN_vkVoidFunction)vkCreateCommandPool;
10586    if (!strcmp(funcName, "vkDestroyCommandPool"))
10587        return (PFN_vkVoidFunction)vkDestroyCommandPool;
10588    if (!strcmp(funcName, "vkResetCommandPool"))
10589        return (PFN_vkVoidFunction)vkResetCommandPool;
10590    if (!strcmp(funcName, "vkCreateQueryPool"))
10591        return (PFN_vkVoidFunction)vkCreateQueryPool;
10592    if (!strcmp(funcName, "vkAllocateCommandBuffers"))
10593        return (PFN_vkVoidFunction)vkAllocateCommandBuffers;
10594    if (!strcmp(funcName, "vkFreeCommandBuffers"))
10595        return (PFN_vkVoidFunction)vkFreeCommandBuffers;
10596    if (!strcmp(funcName, "vkBeginCommandBuffer"))
10597        return (PFN_vkVoidFunction)vkBeginCommandBuffer;
10598    if (!strcmp(funcName, "vkEndCommandBuffer"))
10599        return (PFN_vkVoidFunction)vkEndCommandBuffer;
10600    if (!strcmp(funcName, "vkResetCommandBuffer"))
10601        return (PFN_vkVoidFunction)vkResetCommandBuffer;
10602    if (!strcmp(funcName, "vkCmdBindPipeline"))
10603        return (PFN_vkVoidFunction)vkCmdBindPipeline;
10604    if (!strcmp(funcName, "vkCmdSetViewport"))
10605        return (PFN_vkVoidFunction)vkCmdSetViewport;
10606    if (!strcmp(funcName, "vkCmdSetScissor"))
10607        return (PFN_vkVoidFunction)vkCmdSetScissor;
10608    if (!strcmp(funcName, "vkCmdSetLineWidth"))
10609        return (PFN_vkVoidFunction)vkCmdSetLineWidth;
10610    if (!strcmp(funcName, "vkCmdSetDepthBias"))
10611        return (PFN_vkVoidFunction)vkCmdSetDepthBias;
10612    if (!strcmp(funcName, "vkCmdSetBlendConstants"))
10613        return (PFN_vkVoidFunction)vkCmdSetBlendConstants;
10614    if (!strcmp(funcName, "vkCmdSetDepthBounds"))
10615        return (PFN_vkVoidFunction)vkCmdSetDepthBounds;
10616    if (!strcmp(funcName, "vkCmdSetStencilCompareMask"))
10617        return (PFN_vkVoidFunction)vkCmdSetStencilCompareMask;
10618    if (!strcmp(funcName, "vkCmdSetStencilWriteMask"))
10619        return (PFN_vkVoidFunction)vkCmdSetStencilWriteMask;
10620    if (!strcmp(funcName, "vkCmdSetStencilReference"))
10621        return (PFN_vkVoidFunction)vkCmdSetStencilReference;
10622    if (!strcmp(funcName, "vkCmdBindDescriptorSets"))
10623        return (PFN_vkVoidFunction)vkCmdBindDescriptorSets;
10624    if (!strcmp(funcName, "vkCmdBindVertexBuffers"))
10625        return (PFN_vkVoidFunction)vkCmdBindVertexBuffers;
10626    if (!strcmp(funcName, "vkCmdBindIndexBuffer"))
10627        return (PFN_vkVoidFunction)vkCmdBindIndexBuffer;
10628    if (!strcmp(funcName, "vkCmdDraw"))
10629        return (PFN_vkVoidFunction)vkCmdDraw;
10630    if (!strcmp(funcName, "vkCmdDrawIndexed"))
10631        return (PFN_vkVoidFunction)vkCmdDrawIndexed;
10632    if (!strcmp(funcName, "vkCmdDrawIndirect"))
10633        return (PFN_vkVoidFunction)vkCmdDrawIndirect;
10634    if (!strcmp(funcName, "vkCmdDrawIndexedIndirect"))
10635        return (PFN_vkVoidFunction)vkCmdDrawIndexedIndirect;
10636    if (!strcmp(funcName, "vkCmdDispatch"))
10637        return (PFN_vkVoidFunction)vkCmdDispatch;
10638    if (!strcmp(funcName, "vkCmdDispatchIndirect"))
10639        return (PFN_vkVoidFunction)vkCmdDispatchIndirect;
10640    if (!strcmp(funcName, "vkCmdCopyBuffer"))
10641        return (PFN_vkVoidFunction)vkCmdCopyBuffer;
10642    if (!strcmp(funcName, "vkCmdCopyImage"))
10643        return (PFN_vkVoidFunction)vkCmdCopyImage;
10644    if (!strcmp(funcName, "vkCmdBlitImage"))
10645        return (PFN_vkVoidFunction)vkCmdBlitImage;
10646    if (!strcmp(funcName, "vkCmdCopyBufferToImage"))
10647        return (PFN_vkVoidFunction)vkCmdCopyBufferToImage;
10648    if (!strcmp(funcName, "vkCmdCopyImageToBuffer"))
10649        return (PFN_vkVoidFunction)vkCmdCopyImageToBuffer;
10650    if (!strcmp(funcName, "vkCmdUpdateBuffer"))
10651        return (PFN_vkVoidFunction)vkCmdUpdateBuffer;
10652    if (!strcmp(funcName, "vkCmdFillBuffer"))
10653        return (PFN_vkVoidFunction)vkCmdFillBuffer;
10654    if (!strcmp(funcName, "vkCmdClearColorImage"))
10655        return (PFN_vkVoidFunction)vkCmdClearColorImage;
10656    if (!strcmp(funcName, "vkCmdClearDepthStencilImage"))
10657        return (PFN_vkVoidFunction)vkCmdClearDepthStencilImage;
10658    if (!strcmp(funcName, "vkCmdClearAttachments"))
10659        return (PFN_vkVoidFunction)vkCmdClearAttachments;
10660    if (!strcmp(funcName, "vkCmdResolveImage"))
10661        return (PFN_vkVoidFunction)vkCmdResolveImage;
10662    if (!strcmp(funcName, "vkCmdSetEvent"))
10663        return (PFN_vkVoidFunction)vkCmdSetEvent;
10664    if (!strcmp(funcName, "vkCmdResetEvent"))
10665        return (PFN_vkVoidFunction)vkCmdResetEvent;
10666    if (!strcmp(funcName, "vkCmdWaitEvents"))
10667        return (PFN_vkVoidFunction)vkCmdWaitEvents;
10668    if (!strcmp(funcName, "vkCmdPipelineBarrier"))
10669        return (PFN_vkVoidFunction)vkCmdPipelineBarrier;
10670    if (!strcmp(funcName, "vkCmdBeginQuery"))
10671        return (PFN_vkVoidFunction)vkCmdBeginQuery;
10672    if (!strcmp(funcName, "vkCmdEndQuery"))
10673        return (PFN_vkVoidFunction)vkCmdEndQuery;
10674    if (!strcmp(funcName, "vkCmdResetQueryPool"))
10675        return (PFN_vkVoidFunction)vkCmdResetQueryPool;
10676    if (!strcmp(funcName, "vkCmdCopyQueryPoolResults"))
10677        return (PFN_vkVoidFunction)vkCmdCopyQueryPoolResults;
10678    if (!strcmp(funcName, "vkCmdPushConstants"))
10679        return (PFN_vkVoidFunction)vkCmdPushConstants;
10680    if (!strcmp(funcName, "vkCmdWriteTimestamp"))
10681        return (PFN_vkVoidFunction)vkCmdWriteTimestamp;
10682    if (!strcmp(funcName, "vkCreateFramebuffer"))
10683        return (PFN_vkVoidFunction)vkCreateFramebuffer;
10684    if (!strcmp(funcName, "vkCreateShaderModule"))
10685        return (PFN_vkVoidFunction)vkCreateShaderModule;
10686    if (!strcmp(funcName, "vkCreateRenderPass"))
10687        return (PFN_vkVoidFunction)vkCreateRenderPass;
10688    if (!strcmp(funcName, "vkCmdBeginRenderPass"))
10689        return (PFN_vkVoidFunction)vkCmdBeginRenderPass;
10690    if (!strcmp(funcName, "vkCmdNextSubpass"))
10691        return (PFN_vkVoidFunction)vkCmdNextSubpass;
10692    if (!strcmp(funcName, "vkCmdEndRenderPass"))
10693        return (PFN_vkVoidFunction)vkCmdEndRenderPass;
10694    if (!strcmp(funcName, "vkCmdExecuteCommands"))
10695        return (PFN_vkVoidFunction)vkCmdExecuteCommands;
10696    if (!strcmp(funcName, "vkSetEvent"))
10697        return (PFN_vkVoidFunction)vkSetEvent;
10698    if (!strcmp(funcName, "vkMapMemory"))
10699        return (PFN_vkVoidFunction)vkMapMemory;
10700#if MTMERGESOURCE
10701    if (!strcmp(funcName, "vkUnmapMemory"))
10702        return (PFN_vkVoidFunction)vkUnmapMemory;
10703    if (!strcmp(funcName, "vkAllocateMemory"))
10704        return (PFN_vkVoidFunction)vkAllocateMemory;
10705    if (!strcmp(funcName, "vkFreeMemory"))
10706        return (PFN_vkVoidFunction)vkFreeMemory;
10707    if (!strcmp(funcName, "vkFlushMappedMemoryRanges"))
10708        return (PFN_vkVoidFunction)vkFlushMappedMemoryRanges;
10709    if (!strcmp(funcName, "vkInvalidateMappedMemoryRanges"))
10710        return (PFN_vkVoidFunction)vkInvalidateMappedMemoryRanges;
10711    if (!strcmp(funcName, "vkBindBufferMemory"))
10712        return (PFN_vkVoidFunction)vkBindBufferMemory;
10713    if (!strcmp(funcName, "vkGetBufferMemoryRequirements"))
10714        return (PFN_vkVoidFunction)vkGetBufferMemoryRequirements;
10715    if (!strcmp(funcName, "vkGetImageMemoryRequirements"))
10716        return (PFN_vkVoidFunction)vkGetImageMemoryRequirements;
10717#endif
10718    if (!strcmp(funcName, "vkGetQueryPoolResults"))
10719        return (PFN_vkVoidFunction)vkGetQueryPoolResults;
10720    if (!strcmp(funcName, "vkBindImageMemory"))
10721        return (PFN_vkVoidFunction)vkBindImageMemory;
10722    if (!strcmp(funcName, "vkQueueBindSparse"))
10723        return (PFN_vkVoidFunction)vkQueueBindSparse;
10724    if (!strcmp(funcName, "vkCreateSemaphore"))
10725        return (PFN_vkVoidFunction)vkCreateSemaphore;
10726    if (!strcmp(funcName, "vkCreateEvent"))
10727        return (PFN_vkVoidFunction)vkCreateEvent;
10728
10729    if (dev == NULL)
10730        return NULL;
10731
10732    layer_data *dev_data;
10733    dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
10734
10735    if (dev_data->device_extensions.wsi_enabled) {
10736        if (!strcmp(funcName, "vkCreateSwapchainKHR"))
10737            return (PFN_vkVoidFunction)vkCreateSwapchainKHR;
10738        if (!strcmp(funcName, "vkDestroySwapchainKHR"))
10739            return (PFN_vkVoidFunction)vkDestroySwapchainKHR;
10740        if (!strcmp(funcName, "vkGetSwapchainImagesKHR"))
10741            return (PFN_vkVoidFunction)vkGetSwapchainImagesKHR;
10742        if (!strcmp(funcName, "vkAcquireNextImageKHR"))
10743            return (PFN_vkVoidFunction)vkAcquireNextImageKHR;
10744        if (!strcmp(funcName, "vkQueuePresentKHR"))
10745            return (PFN_vkVoidFunction)vkQueuePresentKHR;
10746    }
10747
10748    VkLayerDispatchTable *pTable = dev_data->device_dispatch_table;
10749    {
10750        if (pTable->GetDeviceProcAddr == NULL)
10751            return NULL;
10752        return pTable->GetDeviceProcAddr(dev, funcName);
10753    }
10754}
10755
10756VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
10757    if (!strcmp(funcName, "vkGetInstanceProcAddr"))
10758        return (PFN_vkVoidFunction)vkGetInstanceProcAddr;
10759    if (!strcmp(funcName, "vkGetDeviceProcAddr"))
10760        return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
10761    if (!strcmp(funcName, "vkCreateInstance"))
10762        return (PFN_vkVoidFunction)vkCreateInstance;
10763    if (!strcmp(funcName, "vkCreateDevice"))
10764        return (PFN_vkVoidFunction)vkCreateDevice;
10765    if (!strcmp(funcName, "vkDestroyInstance"))
10766        return (PFN_vkVoidFunction)vkDestroyInstance;
10767    if (!strcmp(funcName, "vkEnumerateInstanceLayerProperties"))
10768        return (PFN_vkVoidFunction)vkEnumerateInstanceLayerProperties;
10769    if (!strcmp(funcName, "vkEnumerateInstanceExtensionProperties"))
10770        return (PFN_vkVoidFunction)vkEnumerateInstanceExtensionProperties;
10771    if (!strcmp(funcName, "vkEnumerateDeviceLayerProperties"))
10772        return (PFN_vkVoidFunction)vkEnumerateDeviceLayerProperties;
10773    if (!strcmp(funcName, "vkEnumerateDeviceExtensionProperties"))
10774        return (PFN_vkVoidFunction)vkEnumerateDeviceExtensionProperties;
10775
10776    if (instance == NULL)
10777        return NULL;
10778
10779    PFN_vkVoidFunction fptr;
10780
10781    layer_data *my_data;
10782    my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10783    fptr = debug_report_get_instance_proc_addr(my_data->report_data, funcName);
10784    if (fptr)
10785        return fptr;
10786
10787    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10788    if (pTable->GetInstanceProcAddr == NULL)
10789        return NULL;
10790    return pTable->GetInstanceProcAddr(instance, funcName);
10791}
10792