core_validation.cpp revision fce842878e9ddcc7f37e1c457a4b018d52358087
1/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
25 */
26
27// Allow use of STL min and max functions in Windows
28#define NOMINMAX
29
30// Turn on mem_tracker merged code
31#define MTMERGESOURCE 1
32
33#include <SPIRV/spirv.hpp>
34#include <algorithm>
35#include <assert.h>
36#include <iostream>
37#include <list>
38#include <map>
39#include <mutex>
40#include <set>
41#include <stdio.h>
42#include <stdlib.h>
43#include <string.h>
44#include <string>
45#include <unordered_map>
46#include <unordered_set>
47
48#include "vk_loader_platform.h"
49#include "vk_dispatch_table_helper.h"
50#include "vk_struct_string_helper_cpp.h"
51#if defined(__GNUC__)
52#pragma GCC diagnostic ignored "-Wwrite-strings"
53#endif
54#if defined(__GNUC__)
55#pragma GCC diagnostic warning "-Wwrite-strings"
56#endif
57#include "vk_struct_size_helper.h"
58#include "core_validation.h"
59#include "vk_layer_table.h"
60#include "vk_layer_data.h"
61#include "vk_layer_extension_utils.h"
62#include "vk_layer_utils.h"
63
64#if defined __ANDROID__
65#include <android/log.h>
66#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
67#else
68#define LOGCONSOLE(...) printf(__VA_ARGS__)
69#endif
70
71using std::unordered_map;
72using std::unordered_set;
73
74// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
75// Object value will be used to identify them internally.
76static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
77
78// Track command pools and their command buffers
79struct CMD_POOL_INFO {
80    VkCommandPoolCreateFlags createFlags;
81    uint32_t queueFamilyIndex;
82    list<VkCommandBuffer> commandBuffers; // list container of cmd buffers allocated from this pool
83};
84
85struct devExts {
86    bool wsi_enabled;
87    unordered_map<VkSwapchainKHR, SWAPCHAIN_NODE *> swapchainMap;
88    unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
89};
90
91// fwd decls
92struct shader_module;
93
94// TODO : Split this into separate structs for instance and device level data?
95struct layer_data {
96    debug_report_data *report_data;
97    std::vector<VkDebugReportCallbackEXT> logging_callback;
98    VkLayerDispatchTable *device_dispatch_table;
99    VkLayerInstanceDispatchTable *instance_dispatch_table;
100
101    devExts device_extensions;
102    unordered_set<VkQueue> queues;  // all queues under given device
103    // Global set of all cmdBuffers that are inFlight on this device
104    unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
105    // Layer specific data
106    unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> sampleMap;
107    unordered_map<VkImageView, VkImageViewCreateInfo> imageViewMap;
108    unordered_map<VkImage, IMAGE_NODE> imageMap;
109    unordered_map<VkBufferView, VkBufferViewCreateInfo> bufferViewMap;
110    unordered_map<VkBuffer, BUFFER_NODE> bufferMap;
111    unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap;
112    unordered_map<VkCommandPool, CMD_POOL_INFO> commandPoolMap;
113    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap;
114    unordered_map<VkDescriptorSet, SET_NODE *> setMap;
115    unordered_map<VkDescriptorSetLayout, DescriptorSetLayout> descriptorSetLayoutMap;
116    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
117    unordered_map<VkDeviceMemory, DEVICE_MEM_INFO> memObjMap;
118    unordered_map<VkFence, FENCE_NODE> fenceMap;
119    unordered_map<VkQueue, QUEUE_NODE> queueMap;
120    unordered_map<VkEvent, EVENT_NODE> eventMap;
121    unordered_map<QueryObject, bool> queryToStateMap;
122    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
123    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
124    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
125    unordered_map<VkFramebuffer, FRAMEBUFFER_NODE> frameBufferMap;
126    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
127    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
128    unordered_map<VkRenderPass, RENDER_PASS_NODE *> renderPassMap;
129    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
130    VkDevice device;
131
132    // Device specific data
133    PHYS_DEV_PROPERTIES_NODE phys_dev_properties;
134    VkPhysicalDeviceMemoryProperties phys_dev_mem_props;
135
136    layer_data()
137        : report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr), device_extensions(),
138          device(VK_NULL_HANDLE), phys_dev_properties{}, phys_dev_mem_props{} {};
139};
140
141// TODO : Do we need to guard access to layer_data_map w/ lock?
142static unordered_map<void *, layer_data *> layer_data_map;
143
144static const VkLayerProperties cv_global_layers[] = {{
145    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
146}};
147
148template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
149    bool foundLayer = false;
150    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
151        if (!strcmp(createInfo.ppEnabledLayerNames[i], cv_global_layers[0].layerName)) {
152            foundLayer = true;
153        }
154        // This has to be logged to console as we don't have a callback at this point.
155        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
156            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
157                       cv_global_layers[0].layerName);
158        }
159    }
160}
161
162// Code imported from shader_checker
163static void build_def_index(shader_module *);
164
165// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
166// without the caller needing to care too much about the physical SPIRV module layout.
167struct spirv_inst_iter {
168    std::vector<uint32_t>::const_iterator zero;
169    std::vector<uint32_t>::const_iterator it;
170
171    uint32_t len() { return *it >> 16; }
172    uint32_t opcode() { return *it & 0x0ffffu; }
173    uint32_t const &word(unsigned n) { return it[n]; }
174    uint32_t offset() { return (uint32_t)(it - zero); }
175
176    spirv_inst_iter() {}
177
178    spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
179
180    bool operator==(spirv_inst_iter const &other) { return it == other.it; }
181
182    bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
183
184    spirv_inst_iter operator++(int) { /* x++ */
185        spirv_inst_iter ii = *this;
186        it += len();
187        return ii;
188    }
189
190    spirv_inst_iter operator++() { /* ++x; */
191        it += len();
192        return *this;
193    }
194
195    /* The iterator and the value are the same thing. */
196    spirv_inst_iter &operator*() { return *this; }
197    spirv_inst_iter const &operator*() const { return *this; }
198};
199
200struct shader_module {
201    /* the spirv image itself */
202    vector<uint32_t> words;
203    /* a mapping of <id> to the first word of its def. this is useful because walking type
204     * trees, constant expressions, etc requires jumping all over the instruction stream.
205     */
206    unordered_map<unsigned, unsigned> def_index;
207
208    shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
209        : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
210          def_index() {
211
212        build_def_index(this);
213    }
214
215    /* expose begin() / end() to enable range-based for */
216    spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
217    spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }         /* just past last insn */
218    /* given an offset into the module, produce an iterator there. */
219    spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
220
221    /* gets an iterator to the definition of an id */
222    spirv_inst_iter get_def(unsigned id) const {
223        auto it = def_index.find(id);
224        if (it == def_index.end()) {
225            return end();
226        }
227        return at(it->second);
228    }
229};
230
231// TODO : This can be much smarter, using separate locks for separate global data
232static std::mutex global_lock;
233#if MTMERGESOURCE
234// MTMERGESOURCE - start of direct pull
235static VkDeviceMemory *get_object_mem_binding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
236    switch (type) {
237    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
238        auto it = my_data->imageMap.find(VkImage(handle));
239        if (it != my_data->imageMap.end())
240            return &(*it).second.mem;
241        break;
242    }
243    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
244        auto it = my_data->bufferMap.find(VkBuffer(handle));
245        if (it != my_data->bufferMap.end())
246            return &(*it).second.mem;
247        break;
248    }
249    default:
250        break;
251    }
252    return nullptr;
253}
254// MTMERGESOURCE - end section
255#endif
256template layer_data *get_my_data_ptr<layer_data>(void *data_key, std::unordered_map<void *, layer_data *> &data_map);
257
258// prototype
259static GLOBAL_CB_NODE *getCBNode(layer_data *, const VkCommandBuffer);
260
261#if MTMERGESOURCE
262// Helper function to validate correct usage bits set for buffers or images
263//  Verify that (actual & desired) flags != 0 or,
264//   if strict is true, verify that (actual & desired) flags == desired
265//  In case of error, report it via dbg callbacks
266static bool validate_usage_flags(layer_data *my_data, VkFlags actual, VkFlags desired, VkBool32 strict,
267                                     uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str,
268                                     char const *func_name, char const *usage_str) {
269    bool correct_usage = false;
270    bool skipCall = false;
271    if (strict)
272        correct_usage = ((actual & desired) == desired);
273    else
274        correct_usage = ((actual & desired) != 0);
275    if (!correct_usage) {
276        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
277                           MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s %#" PRIxLEAST64
278                                                               " used by %s. In this case, %s should have %s set during creation.",
279                           ty_str, obj_handle, func_name, ty_str, usage_str);
280    }
281    return skipCall;
282}
283
284// Helper function to validate usage flags for images
285// Pulls image info and then sends actual vs. desired usage off to helper above where
286//  an error will be flagged if usage is not correct
287static bool validate_image_usage_flags(layer_data *dev_data, VkImage image, VkFlags desired, VkBool32 strict,
288                                           char const *func_name, char const *usage_string) {
289    bool skipCall = false;
290    auto const image_node = dev_data->imageMap.find(image);
291    if (image_node != dev_data->imageMap.end()) {
292        skipCall = validate_usage_flags(dev_data, image_node->second.createInfo.usage, desired, strict, (uint64_t)image,
293                                        VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "image", func_name, usage_string);
294    }
295    return skipCall;
296}
297
298// Helper function to validate usage flags for buffers
299// Pulls buffer info and then sends actual vs. desired usage off to helper above where
300//  an error will be flagged if usage is not correct
301static bool validate_buffer_usage_flags(layer_data *dev_data, VkBuffer buffer, VkFlags desired, VkBool32 strict,
302                                            char const *func_name, char const *usage_string) {
303    bool skipCall = false;
304    auto const buffer_node = dev_data->bufferMap.find(buffer);
305    if (buffer_node != dev_data->bufferMap.end()) {
306        skipCall = validate_usage_flags(dev_data, buffer_node->second.createInfo.usage, desired, strict, (uint64_t)buffer,
307                                        VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "buffer", func_name, usage_string);
308    }
309    return skipCall;
310}
311
312// Return ptr to info in map container containing mem, or NULL if not found
313//  Calls to this function should be wrapped in mutex
314static DEVICE_MEM_INFO *get_mem_obj_info(layer_data *dev_data, const VkDeviceMemory mem) {
315    auto item = dev_data->memObjMap.find(mem);
316    if (item != dev_data->memObjMap.end()) {
317        return &(*item).second;
318    } else {
319        return NULL;
320    }
321}
322
323static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
324                             const VkMemoryAllocateInfo *pAllocateInfo) {
325    assert(object != NULL);
326
327    memcpy(&my_data->memObjMap[mem].allocInfo, pAllocateInfo, sizeof(VkMemoryAllocateInfo));
328    // TODO:  Update for real hardware, actually process allocation info structures
329    my_data->memObjMap[mem].allocInfo.pNext = NULL;
330    my_data->memObjMap[mem].object = object;
331    my_data->memObjMap[mem].mem = mem;
332    my_data->memObjMap[mem].image = VK_NULL_HANDLE;
333    my_data->memObjMap[mem].memRange.offset = 0;
334    my_data->memObjMap[mem].memRange.size = 0;
335    my_data->memObjMap[mem].pData = 0;
336    my_data->memObjMap[mem].pDriverData = 0;
337    my_data->memObjMap[mem].valid = false;
338}
339
340static bool validate_memory_is_valid(layer_data *dev_data, VkDeviceMemory mem, const char *functionName,
341                                     VkImage image = VK_NULL_HANDLE) {
342    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
343        auto const image_node = dev_data->imageMap.find(image);
344        if (image_node != dev_data->imageMap.end() && !image_node->second.valid) {
345            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
346                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
347                           "%s: Cannot read invalid swapchain image %" PRIx64 ", please fill the memory before using.",
348                           functionName, (uint64_t)(image));
349        }
350    } else {
351        DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
352        if (pMemObj && !pMemObj->valid) {
353            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
354                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
355                           "%s: Cannot read invalid memory %" PRIx64 ", please fill the memory before using.", functionName,
356                           (uint64_t)(mem));
357        }
358    }
359    return false;
360}
361
362static void set_memory_valid(layer_data *dev_data, VkDeviceMemory mem, bool valid, VkImage image = VK_NULL_HANDLE) {
363    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
364        auto image_node = dev_data->imageMap.find(image);
365        if (image_node != dev_data->imageMap.end()) {
366            image_node->second.valid = valid;
367        }
368    } else {
369        DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
370        if (pMemObj) {
371            pMemObj->valid = valid;
372        }
373    }
374}
375
376// Find CB Info and add mem reference to list container
377// Find Mem Obj Info and add CB reference to list container
378static bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
379                                              const char *apiName) {
380    bool skipCall = false;
381
382    // Skip validation if this image was created through WSI
383    if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
384
385        // First update CB binding in MemObj mini CB list
386        DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem);
387        if (pMemInfo) {
388            pMemInfo->commandBufferBindings.insert(cb);
389            // Now update CBInfo's Mem reference list
390            GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
391            // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
392            if (pCBNode) {
393                pCBNode->memObjs.insert(mem);
394            }
395        }
396    }
397    return skipCall;
398}
399// For every mem obj bound to particular CB, free bindings related to that CB
400static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *pCBNode) {
401    if (pCBNode) {
402        if (pCBNode->memObjs.size() > 0) {
403            for (auto mem : pCBNode->memObjs) {
404                DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
405                if (pInfo) {
406                    pInfo->commandBufferBindings.erase(pCBNode->commandBuffer);
407                }
408            }
409            pCBNode->memObjs.clear();
410        }
411        pCBNode->validate_functions.clear();
412    }
413}
414// Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up
415static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
416    clear_cmd_buf_and_mem_references(dev_data, getCBNode(dev_data, cb));
417}
418
419// For given MemObjInfo, report Obj & CB bindings
420static bool reportMemReferencesAndCleanUp(layer_data *dev_data, DEVICE_MEM_INFO *pMemObjInfo) {
421    bool skipCall = false;
422    size_t cmdBufRefCount = pMemObjInfo->commandBufferBindings.size();
423    size_t objRefCount = pMemObjInfo->objBindings.size();
424
425    if ((pMemObjInfo->commandBufferBindings.size()) != 0) {
426        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
427                           (uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
428                           "Attempting to free memory object %#" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER
429                           " references",
430                           (uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount));
431    }
432
433    if (cmdBufRefCount > 0 && pMemObjInfo->commandBufferBindings.size() > 0) {
434        for (auto cb : pMemObjInfo->commandBufferBindings) {
435            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
436                    (uint64_t)cb, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
437                    "Command Buffer %p still has a reference to mem obj %#" PRIxLEAST64, cb, (uint64_t)pMemObjInfo->mem);
438        }
439        // Clear the list of hanging references
440        pMemObjInfo->commandBufferBindings.clear();
441    }
442
443    if (objRefCount > 0 && pMemObjInfo->objBindings.size() > 0) {
444        for (auto obj : pMemObjInfo->objBindings) {
445            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__,
446                    MEMTRACK_FREED_MEM_REF, "MEM", "VK Object %#" PRIxLEAST64 " still has a reference to mem obj %#" PRIxLEAST64,
447                    obj.handle, (uint64_t)pMemObjInfo->mem);
448        }
449        // Clear the list of hanging references
450        pMemObjInfo->objBindings.clear();
451    }
452    return skipCall;
453}
454
455static bool deleteMemObjInfo(layer_data *my_data, void *object, VkDeviceMemory mem) {
456    bool skipCall = false;
457    auto item = my_data->memObjMap.find(mem);
458    if (item != my_data->memObjMap.end()) {
459        my_data->memObjMap.erase(item);
460    } else {
461        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
462                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
463                           "Request to delete memory object %#" PRIxLEAST64 " not present in memory Object Map", (uint64_t)mem);
464    }
465    return skipCall;
466}
467
468static bool freeMemObjInfo(layer_data *dev_data, void *object, VkDeviceMemory mem, bool internal) {
469    bool skipCall = false;
470    // Parse global list to find info w/ mem
471    DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
472    if (pInfo) {
473        if (pInfo->allocInfo.allocationSize == 0 && !internal) {
474            // TODO: Verify against Valid Use section
475            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
476                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
477                               "Attempting to free memory associated with a Persistent Image, %#" PRIxLEAST64 ", "
478                               "this should not be explicitly freed\n",
479                               (uint64_t)mem);
480        } else {
481            // Clear any CB bindings for completed CBs
482            //   TODO : Is there a better place to do this?
483
484            assert(pInfo->object != VK_NULL_HANDLE);
485            // clear_cmd_buf_and_mem_references removes elements from
486            // pInfo->commandBufferBindings -- this copy not needed in c++14,
487            // and probably not needed in practice in c++11
488            auto bindings = pInfo->commandBufferBindings;
489            for (auto cb : bindings) {
490                if (!dev_data->globalInFlightCmdBuffers.count(cb)) {
491                    clear_cmd_buf_and_mem_references(dev_data, cb);
492                }
493            }
494
495            // Now verify that no references to this mem obj remain and remove bindings
496            if (pInfo->commandBufferBindings.size() || pInfo->objBindings.size()) {
497                skipCall |= reportMemReferencesAndCleanUp(dev_data, pInfo);
498            }
499            // Delete mem obj info
500            skipCall |= deleteMemObjInfo(dev_data, object, mem);
501        }
502    }
503    return skipCall;
504}
505
506static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
507    switch (type) {
508    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
509        return "image";
510    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
511        return "buffer";
512    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
513        return "swapchain";
514    default:
515        return "unknown";
516    }
517}
518
519// Remove object binding performs 3 tasks:
520// 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it
521// 2. Clear mem binding for image/buffer by setting its handle to 0
522// TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized?
523static bool clear_object_binding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
524    // TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately
525    bool skipCall = false;
526    VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
527    if (pMemBinding) {
528        DEVICE_MEM_INFO *pMemObjInfo = get_mem_obj_info(dev_data, *pMemBinding);
529        // TODO : Make sure this is a reasonable way to reset mem binding
530        *pMemBinding = VK_NULL_HANDLE;
531        if (pMemObjInfo) {
532            // This obj is bound to a memory object. Remove the reference to this object in that memory object's list,
533            // and set the objects memory binding pointer to NULL.
534            if (!pMemObjInfo->objBindings.erase({handle, type})) {
535                skipCall |=
536                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
537                            "MEM", "While trying to clear mem binding for %s obj %#" PRIxLEAST64
538                                   ", unable to find that object referenced by mem obj %#" PRIxLEAST64,
539                            object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem);
540            }
541        }
542    }
543    return skipCall;
544}
545
546// For NULL mem case, output warning
547// Make sure given object is in global object map
548//  IF a previous binding existed, output validation error
549//  Otherwise, add reference from objectInfo to memoryInfo
550//  Add reference off of objInfo
551static bool set_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
552                                VkDebugReportObjectTypeEXT type, const char *apiName) {
553    bool skipCall = false;
554    // Handle NULL case separately, just clear previous binding & decrement reference
555    if (mem == VK_NULL_HANDLE) {
556        // TODO: Verify against Valid Use section of spec.
557        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ,
558                           "MEM", "In %s, attempting to Bind Obj(%#" PRIxLEAST64 ") to NULL", apiName, handle);
559    } else {
560        VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
561        if (!pMemBinding) {
562            skipCall |=
563                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS,
564                        "MEM", "In %s, attempting to update Binding of %s Obj(%#" PRIxLEAST64 ") that's not in global list",
565                        object_type_to_string(type), apiName, handle);
566        } else {
567            // non-null case so should have real mem obj
568            DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem);
569            if (pMemInfo) {
570                DEVICE_MEM_INFO *pPrevBinding = get_mem_obj_info(dev_data, *pMemBinding);
571                if (pPrevBinding != NULL) {
572                    skipCall |=
573                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
574                                (uint64_t)mem, __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
575                                "In %s, attempting to bind memory (%#" PRIxLEAST64 ") to object (%#" PRIxLEAST64
576                                ") which has already been bound to mem object %#" PRIxLEAST64,
577                                apiName, (uint64_t)mem, handle, (uint64_t)pPrevBinding->mem);
578                } else {
579                    pMemInfo->objBindings.insert({handle, type});
580                    // For image objects, make sure default memory state is correctly set
581                    // TODO : What's the best/correct way to handle this?
582                    if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
583                        auto const image_node = dev_data->imageMap.find(VkImage(handle));
584                        if (image_node != dev_data->imageMap.end()) {
585                            VkImageCreateInfo ici = image_node->second.createInfo;
586                            if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
587                                // TODO::  More memory state transition stuff.
588                            }
589                        }
590                    }
591                    *pMemBinding = mem;
592                }
593            }
594        }
595    }
596    return skipCall;
597}
598
599// For NULL mem case, clear any previous binding Else...
600// Make sure given object is in its object map
601//  IF a previous binding existed, update binding
602//  Add reference from objectInfo to memoryInfo
603//  Add reference off of object's binding info
604// Return VK_TRUE if addition is successful, VK_FALSE otherwise
605static bool set_sparse_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
606                                       VkDebugReportObjectTypeEXT type, const char *apiName) {
607    bool skipCall = VK_FALSE;
608    // Handle NULL case separately, just clear previous binding & decrement reference
609    if (mem == VK_NULL_HANDLE) {
610        skipCall = clear_object_binding(dev_data, handle, type);
611    } else {
612        VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
613        if (!pMemBinding) {
614            skipCall |= log_msg(
615                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS, "MEM",
616                "In %s, attempting to update Binding of Obj(%#" PRIxLEAST64 ") that's not in global list()", apiName, handle);
617        } else {
618            // non-null case so should have real mem obj
619            DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
620            if (pInfo) {
621                pInfo->objBindings.insert({handle, type});
622                // Need to set mem binding for this object
623                *pMemBinding = mem;
624            }
625        }
626    }
627    return skipCall;
628}
629
630// For given Object, get 'mem' obj that it's bound to or NULL if no binding
631static bool get_mem_binding_from_object(layer_data *dev_data, const uint64_t handle,
632                                            const VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) {
633    bool skipCall = false;
634    *mem = VK_NULL_HANDLE;
635    VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
636    if (pMemBinding) {
637        *mem = *pMemBinding;
638    } else {
639        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
640                           "MEM", "Trying to get mem binding for object %#" PRIxLEAST64 " but no such object in %s list", handle,
641                           object_type_to_string(type));
642    }
643    return skipCall;
644}
645
646// Print details of MemObjInfo list
647static void print_mem_list(layer_data *dev_data) {
648    DEVICE_MEM_INFO *pInfo = NULL;
649
650    // Early out if info is not requested
651    if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
652        return;
653    }
654
655    // Just printing each msg individually for now, may want to package these into single large print
656    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
657            MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
658            dev_data->memObjMap.size());
659    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
660            MEMTRACK_NONE, "MEM", "=============================");
661
662    if (dev_data->memObjMap.size() <= 0)
663        return;
664
665    for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
666        pInfo = &(*ii).second;
667
668        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
669                __LINE__, MEMTRACK_NONE, "MEM", "    ===MemObjInfo at %p===", (void *)pInfo);
670        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
671                __LINE__, MEMTRACK_NONE, "MEM", "    Mem object: %#" PRIxLEAST64, (uint64_t)(pInfo->mem));
672        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
673                __LINE__, MEMTRACK_NONE, "MEM", "    Ref Count: " PRINTF_SIZE_T_SPECIFIER,
674                pInfo->commandBufferBindings.size() + pInfo->objBindings.size());
675        if (0 != pInfo->allocInfo.allocationSize) {
676            string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&pInfo->allocInfo, "MEM(INFO):         ");
677            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
678                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
679        } else {
680            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
681                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
682        }
683
684        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
685                __LINE__, MEMTRACK_NONE, "MEM", "    VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:",
686                pInfo->objBindings.size());
687        if (pInfo->objBindings.size() > 0) {
688            for (auto obj : pInfo->objBindings) {
689                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
690                        0, __LINE__, MEMTRACK_NONE, "MEM", "       VK OBJECT %" PRIu64, obj.handle);
691            }
692        }
693
694        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
695                __LINE__, MEMTRACK_NONE, "MEM",
696                "    VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements",
697                pInfo->commandBufferBindings.size());
698        if (pInfo->commandBufferBindings.size() > 0) {
699            for (auto cb : pInfo->commandBufferBindings) {
700                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
701                        0, __LINE__, MEMTRACK_NONE, "MEM", "      VK CB %p", cb);
702            }
703        }
704    }
705}
706
707static void printCBList(layer_data *my_data) {
708    GLOBAL_CB_NODE *pCBInfo = NULL;
709
710    // Early out if info is not requested
711    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
712        return;
713    }
714
715    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
716            MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
717            my_data->commandBufferMap.size());
718    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
719            MEMTRACK_NONE, "MEM", "==================");
720
721    if (my_data->commandBufferMap.size() <= 0)
722        return;
723
724    for (auto &cb_node : my_data->commandBufferMap) {
725        pCBInfo = cb_node.second;
726
727        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
728                __LINE__, MEMTRACK_NONE, "MEM", "    CB Info (%p) has CB %p", (void *)pCBInfo, (void *)pCBInfo->commandBuffer);
729
730        if (pCBInfo->memObjs.size() <= 0)
731            continue;
732        for (auto obj : pCBInfo->memObjs) {
733            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
734                    __LINE__, MEMTRACK_NONE, "MEM", "      Mem obj %" PRIu64, (uint64_t)obj);
735        }
736    }
737}
738
739#endif
740
741// Return a string representation of CMD_TYPE enum
742static string cmdTypeToString(CMD_TYPE cmd) {
743    switch (cmd) {
744    case CMD_BINDPIPELINE:
745        return "CMD_BINDPIPELINE";
746    case CMD_BINDPIPELINEDELTA:
747        return "CMD_BINDPIPELINEDELTA";
748    case CMD_SETVIEWPORTSTATE:
749        return "CMD_SETVIEWPORTSTATE";
750    case CMD_SETLINEWIDTHSTATE:
751        return "CMD_SETLINEWIDTHSTATE";
752    case CMD_SETDEPTHBIASSTATE:
753        return "CMD_SETDEPTHBIASSTATE";
754    case CMD_SETBLENDSTATE:
755        return "CMD_SETBLENDSTATE";
756    case CMD_SETDEPTHBOUNDSSTATE:
757        return "CMD_SETDEPTHBOUNDSSTATE";
758    case CMD_SETSTENCILREADMASKSTATE:
759        return "CMD_SETSTENCILREADMASKSTATE";
760    case CMD_SETSTENCILWRITEMASKSTATE:
761        return "CMD_SETSTENCILWRITEMASKSTATE";
762    case CMD_SETSTENCILREFERENCESTATE:
763        return "CMD_SETSTENCILREFERENCESTATE";
764    case CMD_BINDDESCRIPTORSETS:
765        return "CMD_BINDDESCRIPTORSETS";
766    case CMD_BINDINDEXBUFFER:
767        return "CMD_BINDINDEXBUFFER";
768    case CMD_BINDVERTEXBUFFER:
769        return "CMD_BINDVERTEXBUFFER";
770    case CMD_DRAW:
771        return "CMD_DRAW";
772    case CMD_DRAWINDEXED:
773        return "CMD_DRAWINDEXED";
774    case CMD_DRAWINDIRECT:
775        return "CMD_DRAWINDIRECT";
776    case CMD_DRAWINDEXEDINDIRECT:
777        return "CMD_DRAWINDEXEDINDIRECT";
778    case CMD_DISPATCH:
779        return "CMD_DISPATCH";
780    case CMD_DISPATCHINDIRECT:
781        return "CMD_DISPATCHINDIRECT";
782    case CMD_COPYBUFFER:
783        return "CMD_COPYBUFFER";
784    case CMD_COPYIMAGE:
785        return "CMD_COPYIMAGE";
786    case CMD_BLITIMAGE:
787        return "CMD_BLITIMAGE";
788    case CMD_COPYBUFFERTOIMAGE:
789        return "CMD_COPYBUFFERTOIMAGE";
790    case CMD_COPYIMAGETOBUFFER:
791        return "CMD_COPYIMAGETOBUFFER";
792    case CMD_CLONEIMAGEDATA:
793        return "CMD_CLONEIMAGEDATA";
794    case CMD_UPDATEBUFFER:
795        return "CMD_UPDATEBUFFER";
796    case CMD_FILLBUFFER:
797        return "CMD_FILLBUFFER";
798    case CMD_CLEARCOLORIMAGE:
799        return "CMD_CLEARCOLORIMAGE";
800    case CMD_CLEARATTACHMENTS:
801        return "CMD_CLEARCOLORATTACHMENT";
802    case CMD_CLEARDEPTHSTENCILIMAGE:
803        return "CMD_CLEARDEPTHSTENCILIMAGE";
804    case CMD_RESOLVEIMAGE:
805        return "CMD_RESOLVEIMAGE";
806    case CMD_SETEVENT:
807        return "CMD_SETEVENT";
808    case CMD_RESETEVENT:
809        return "CMD_RESETEVENT";
810    case CMD_WAITEVENTS:
811        return "CMD_WAITEVENTS";
812    case CMD_PIPELINEBARRIER:
813        return "CMD_PIPELINEBARRIER";
814    case CMD_BEGINQUERY:
815        return "CMD_BEGINQUERY";
816    case CMD_ENDQUERY:
817        return "CMD_ENDQUERY";
818    case CMD_RESETQUERYPOOL:
819        return "CMD_RESETQUERYPOOL";
820    case CMD_COPYQUERYPOOLRESULTS:
821        return "CMD_COPYQUERYPOOLRESULTS";
822    case CMD_WRITETIMESTAMP:
823        return "CMD_WRITETIMESTAMP";
824    case CMD_INITATOMICCOUNTERS:
825        return "CMD_INITATOMICCOUNTERS";
826    case CMD_LOADATOMICCOUNTERS:
827        return "CMD_LOADATOMICCOUNTERS";
828    case CMD_SAVEATOMICCOUNTERS:
829        return "CMD_SAVEATOMICCOUNTERS";
830    case CMD_BEGINRENDERPASS:
831        return "CMD_BEGINRENDERPASS";
832    case CMD_ENDRENDERPASS:
833        return "CMD_ENDRENDERPASS";
834    default:
835        return "UNKNOWN";
836    }
837}
838
839// SPIRV utility functions
840static void build_def_index(shader_module *module) {
841    for (auto insn : *module) {
842        switch (insn.opcode()) {
843        /* Types */
844        case spv::OpTypeVoid:
845        case spv::OpTypeBool:
846        case spv::OpTypeInt:
847        case spv::OpTypeFloat:
848        case spv::OpTypeVector:
849        case spv::OpTypeMatrix:
850        case spv::OpTypeImage:
851        case spv::OpTypeSampler:
852        case spv::OpTypeSampledImage:
853        case spv::OpTypeArray:
854        case spv::OpTypeRuntimeArray:
855        case spv::OpTypeStruct:
856        case spv::OpTypeOpaque:
857        case spv::OpTypePointer:
858        case spv::OpTypeFunction:
859        case spv::OpTypeEvent:
860        case spv::OpTypeDeviceEvent:
861        case spv::OpTypeReserveId:
862        case spv::OpTypeQueue:
863        case spv::OpTypePipe:
864            module->def_index[insn.word(1)] = insn.offset();
865            break;
866
867        /* Fixed constants */
868        case spv::OpConstantTrue:
869        case spv::OpConstantFalse:
870        case spv::OpConstant:
871        case spv::OpConstantComposite:
872        case spv::OpConstantSampler:
873        case spv::OpConstantNull:
874            module->def_index[insn.word(2)] = insn.offset();
875            break;
876
877        /* Specialization constants */
878        case spv::OpSpecConstantTrue:
879        case spv::OpSpecConstantFalse:
880        case spv::OpSpecConstant:
881        case spv::OpSpecConstantComposite:
882        case spv::OpSpecConstantOp:
883            module->def_index[insn.word(2)] = insn.offset();
884            break;
885
886        /* Variables */
887        case spv::OpVariable:
888            module->def_index[insn.word(2)] = insn.offset();
889            break;
890
891        /* Functions */
892        case spv::OpFunction:
893            module->def_index[insn.word(2)] = insn.offset();
894            break;
895
896        default:
897            /* We don't care about any other defs for now. */
898            break;
899        }
900    }
901}
902
903static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
904    for (auto insn : *src) {
905        if (insn.opcode() == spv::OpEntryPoint) {
906            auto entrypointName = (char const *)&insn.word(3);
907            auto entrypointStageBits = 1u << insn.word(1);
908
909            if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
910                return insn;
911            }
912        }
913    }
914
915    return src->end();
916}
917
918bool shader_is_spirv(VkShaderModuleCreateInfo const *pCreateInfo) {
919    uint32_t *words = (uint32_t *)pCreateInfo->pCode;
920    size_t sizeInWords = pCreateInfo->codeSize / sizeof(uint32_t);
921
922    /* Just validate that the header makes sense. */
923    return sizeInWords >= 5 && words[0] == spv::MagicNumber && words[1] == spv::Version;
924}
925
926static char const *storage_class_name(unsigned sc) {
927    switch (sc) {
928    case spv::StorageClassInput:
929        return "input";
930    case spv::StorageClassOutput:
931        return "output";
932    case spv::StorageClassUniformConstant:
933        return "const uniform";
934    case spv::StorageClassUniform:
935        return "uniform";
936    case spv::StorageClassWorkgroup:
937        return "workgroup local";
938    case spv::StorageClassCrossWorkgroup:
939        return "workgroup global";
940    case spv::StorageClassPrivate:
941        return "private global";
942    case spv::StorageClassFunction:
943        return "function";
944    case spv::StorageClassGeneric:
945        return "generic";
946    case spv::StorageClassAtomicCounter:
947        return "atomic counter";
948    case spv::StorageClassImage:
949        return "image";
950    case spv::StorageClassPushConstant:
951        return "push constant";
952    default:
953        return "unknown";
954    }
955}
956
957/* get the value of an integral constant */
958unsigned get_constant_value(shader_module const *src, unsigned id) {
959    auto value = src->get_def(id);
960    assert(value != src->end());
961
962    if (value.opcode() != spv::OpConstant) {
963        /* TODO: Either ensure that the specialization transform is already performed on a module we're
964            considering here, OR -- specialize on the fly now.
965            */
966        return 1;
967    }
968
969    return value.word(3);
970}
971
972
973static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
974    auto insn = src->get_def(type);
975    assert(insn != src->end());
976
977    switch (insn.opcode()) {
978    case spv::OpTypeBool:
979        ss << "bool";
980        break;
981    case spv::OpTypeInt:
982        ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
983        break;
984    case spv::OpTypeFloat:
985        ss << "float" << insn.word(2);
986        break;
987    case spv::OpTypeVector:
988        ss << "vec" << insn.word(3) << " of ";
989        describe_type_inner(ss, src, insn.word(2));
990        break;
991    case spv::OpTypeMatrix:
992        ss << "mat" << insn.word(3) << " of ";
993        describe_type_inner(ss, src, insn.word(2));
994        break;
995    case spv::OpTypeArray:
996        ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
997        describe_type_inner(ss, src, insn.word(2));
998        break;
999    case spv::OpTypePointer:
1000        ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1001        describe_type_inner(ss, src, insn.word(3));
1002        break;
1003    case spv::OpTypeStruct: {
1004        ss << "struct of (";
1005        for (unsigned i = 2; i < insn.len(); i++) {
1006            describe_type_inner(ss, src, insn.word(i));
1007            if (i == insn.len() - 1) {
1008                ss << ")";
1009            } else {
1010                ss << ", ";
1011            }
1012        }
1013        break;
1014    }
1015    case spv::OpTypeSampler:
1016        ss << "sampler";
1017        break;
1018    case spv::OpTypeSampledImage:
1019        ss << "sampler+";
1020        describe_type_inner(ss, src, insn.word(2));
1021        break;
1022    case spv::OpTypeImage:
1023        ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1024        break;
1025    default:
1026        ss << "oddtype";
1027        break;
1028    }
1029}
1030
1031
1032static std::string describe_type(shader_module const *src, unsigned type) {
1033    std::ostringstream ss;
1034    describe_type_inner(ss, src, type);
1035    return ss.str();
1036}
1037
1038
1039static bool is_narrow_numeric_type(spirv_inst_iter type)
1040{
1041    if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat)
1042        return false;
1043    return type.word(2) < 64;
1044}
1045
1046
1047static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed, bool b_arrayed, bool relaxed) {
1048    /* walk two type trees together, and complain about differences */
1049    auto a_insn = a->get_def(a_type);
1050    auto b_insn = b->get_def(b_type);
1051    assert(a_insn != a->end());
1052    assert(b_insn != b->end());
1053
1054    if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) {
1055        return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed);
1056    }
1057
1058    if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1059        /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */
1060        return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed);
1061    }
1062
1063    if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) {
1064        return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false);
1065    }
1066
1067    if (a_insn.opcode() != b_insn.opcode()) {
1068        return false;
1069    }
1070
1071    if (a_insn.opcode() == spv::OpTypePointer) {
1072        /* match on pointee type. storage class is expected to differ */
1073        return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed);
1074    }
1075
1076    if (a_arrayed || b_arrayed) {
1077        /* if we havent resolved array-of-verts by here, we're not going to. */
1078        return false;
1079    }
1080
1081    switch (a_insn.opcode()) {
1082    case spv::OpTypeBool:
1083        return true;
1084    case spv::OpTypeInt:
1085        /* match on width, signedness */
1086        return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3);
1087    case spv::OpTypeFloat:
1088        /* match on width */
1089        return a_insn.word(2) == b_insn.word(2);
1090    case spv::OpTypeVector:
1091        /* match on element type, count. */
1092        if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false))
1093            return false;
1094        if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) {
1095            return a_insn.word(3) >= b_insn.word(3);
1096        }
1097        else {
1098            return a_insn.word(3) == b_insn.word(3);
1099        }
1100    case spv::OpTypeMatrix:
1101        /* match on element type, count. */
1102        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && a_insn.word(3) == b_insn.word(3);
1103    case spv::OpTypeArray:
1104        /* match on element type, count. these all have the same layout. we don't get here if
1105         * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction,
1106         * not a literal within OpTypeArray */
1107        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
1108               get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1109    case spv::OpTypeStruct:
1110        /* match on all element types */
1111        {
1112            if (a_insn.len() != b_insn.len()) {
1113                return false; /* structs cannot match if member counts differ */
1114            }
1115
1116            for (unsigned i = 2; i < a_insn.len(); i++) {
1117                if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) {
1118                    return false;
1119                }
1120            }
1121
1122            return true;
1123        }
1124    default:
1125        /* remaining types are CLisms, or may not appear in the interfaces we
1126         * are interested in. Just claim no match.
1127         */
1128        return false;
1129    }
1130}
1131
1132static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1133    auto it = map.find(id);
1134    if (it == map.end())
1135        return def;
1136    else
1137        return it->second;
1138}
1139
1140static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1141    auto insn = src->get_def(type);
1142    assert(insn != src->end());
1143
1144    switch (insn.opcode()) {
1145    case spv::OpTypePointer:
1146        /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
1147         * we're never actually passing pointers around. */
1148        return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1149    case spv::OpTypeArray:
1150        if (strip_array_level) {
1151            return get_locations_consumed_by_type(src, insn.word(2), false);
1152        } else {
1153            return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1154        }
1155    case spv::OpTypeMatrix:
1156        /* num locations is the dimension * element size */
1157        return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1158    case spv::OpTypeVector: {
1159        auto scalar_type = src->get_def(insn.word(2));
1160        auto bit_width = (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ?
1161            scalar_type.word(2) : 32;
1162
1163        /* locations are 128-bit wide; 3- and 4-component vectors of 64 bit
1164         * types require two. */
1165        return (bit_width * insn.word(3) + 127) / 128;
1166    }
1167    default:
1168        /* everything else is just 1. */
1169        return 1;
1170
1171        /* TODO: extend to handle 64bit scalar types, whose vectors may need
1172         * multiple locations. */
1173    }
1174}
1175
1176static unsigned get_locations_consumed_by_format(VkFormat format) {
1177    switch (format) {
1178    case VK_FORMAT_R64G64B64A64_SFLOAT:
1179    case VK_FORMAT_R64G64B64A64_SINT:
1180    case VK_FORMAT_R64G64B64A64_UINT:
1181    case VK_FORMAT_R64G64B64_SFLOAT:
1182    case VK_FORMAT_R64G64B64_SINT:
1183    case VK_FORMAT_R64G64B64_UINT:
1184        return 2;
1185    default:
1186        return 1;
1187    }
1188}
1189
1190typedef std::pair<unsigned, unsigned> location_t;
1191typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1192
1193struct interface_var {
1194    uint32_t id;
1195    uint32_t type_id;
1196    uint32_t offset;
1197    bool is_patch;
1198    bool is_block_member;
1199    /* TODO: collect the name, too? Isn't required to be present. */
1200};
1201
1202struct shader_stage_attributes {
1203    char const *const name;
1204    bool arrayed_input;
1205    bool arrayed_output;
1206};
1207
1208static shader_stage_attributes shader_stage_attribs[] = {
1209    {"vertex shader", false, false},
1210    {"tessellation control shader", true, true},
1211    {"tessellation evaluation shader", true, false},
1212    {"geometry shader", true, false},
1213    {"fragment shader", false, false},
1214};
1215
1216static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1217    while (true) {
1218
1219        if (def.opcode() == spv::OpTypePointer) {
1220            def = src->get_def(def.word(3));
1221        } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1222            def = src->get_def(def.word(2));
1223            is_array_of_verts = false;
1224        } else if (def.opcode() == spv::OpTypeStruct) {
1225            return def;
1226        } else {
1227            return src->end();
1228        }
1229    }
1230}
1231
1232static void collect_interface_block_members(layer_data *my_data, shader_module const *src,
1233                                            std::map<location_t, interface_var> &out,
1234                                            std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1235                                            uint32_t id, uint32_t type_id, bool is_patch) {
1236    /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */
1237    auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch);
1238    if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1239        /* this isn't an interface block. */
1240        return;
1241    }
1242
1243    std::unordered_map<unsigned, unsigned> member_components;
1244
1245    /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */
1246    for (auto insn : *src) {
1247        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1248            unsigned member_index = insn.word(2);
1249
1250            if (insn.word(3) == spv::DecorationComponent) {
1251                unsigned component = insn.word(4);
1252                member_components[member_index] = component;
1253            }
1254        }
1255    }
1256
1257    /* Second pass -- produce the output, from Location decorations */
1258    for (auto insn : *src) {
1259        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1260            unsigned member_index = insn.word(2);
1261            unsigned member_type_id = type.word(2 + member_index);
1262
1263            if (insn.word(3) == spv::DecorationLocation) {
1264                unsigned location = insn.word(4);
1265                unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1266                auto component_it = member_components.find(member_index);
1267                unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1268
1269                for (unsigned int offset = 0; offset < num_locations; offset++) {
1270                    interface_var v;
1271                    v.id = id;
1272                    /* TODO: member index in interface_var too? */
1273                    v.type_id = member_type_id;
1274                    v.offset = offset;
1275                    v.is_patch = is_patch;
1276                    v.is_block_member = true;
1277                    out[std::make_pair(location + offset, component)] = v;
1278                }
1279            }
1280        }
1281    }
1282}
1283
1284static void collect_interface_by_location(layer_data *my_data, shader_module const *src, spirv_inst_iter entrypoint,
1285                                          spv::StorageClass sinterface, std::map<location_t, interface_var> &out,
1286                                          bool is_array_of_verts) {
1287    std::unordered_map<unsigned, unsigned> var_locations;
1288    std::unordered_map<unsigned, unsigned> var_builtins;
1289    std::unordered_map<unsigned, unsigned> var_components;
1290    std::unordered_map<unsigned, unsigned> blocks;
1291    std::unordered_map<unsigned, unsigned> var_patch;
1292
1293    for (auto insn : *src) {
1294
1295        /* We consider two interface models: SSO rendezvous-by-location, and
1296         * builtins. Complain about anything that fits neither model.
1297         */
1298        if (insn.opcode() == spv::OpDecorate) {
1299            if (insn.word(2) == spv::DecorationLocation) {
1300                var_locations[insn.word(1)] = insn.word(3);
1301            }
1302
1303            if (insn.word(2) == spv::DecorationBuiltIn) {
1304                var_builtins[insn.word(1)] = insn.word(3);
1305            }
1306
1307            if (insn.word(2) == spv::DecorationComponent) {
1308                var_components[insn.word(1)] = insn.word(3);
1309            }
1310
1311            if (insn.word(2) == spv::DecorationBlock) {
1312                blocks[insn.word(1)] = 1;
1313            }
1314
1315            if (insn.word(2) == spv::DecorationPatch) {
1316                var_patch[insn.word(1)] = 1;
1317            }
1318        }
1319    }
1320
1321    /* TODO: handle grouped decorations */
1322    /* TODO: handle index=1 dual source outputs from FS -- two vars will
1323     * have the same location, and we DON'T want to clobber. */
1324
1325    /* find the end of the entrypoint's name string. additional zero bytes follow the actual null
1326       terminator, to fill out the rest of the word - so we only need to look at the last byte in
1327       the word to determine which word contains the terminator. */
1328    uint32_t word = 3;
1329    while (entrypoint.word(word) & 0xff000000u) {
1330        ++word;
1331    }
1332    ++word;
1333
1334    for (; word < entrypoint.len(); word++) {
1335        auto insn = src->get_def(entrypoint.word(word));
1336        assert(insn != src->end());
1337        assert(insn.opcode() == spv::OpVariable);
1338
1339        if (insn.word(3) == static_cast<uint32_t>(sinterface)) {
1340            unsigned id = insn.word(2);
1341            unsigned type = insn.word(1);
1342
1343            int location = value_or_default(var_locations, id, -1);
1344            int builtin = value_or_default(var_builtins, id, -1);
1345            unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */
1346            bool is_patch = var_patch.find(id) != var_patch.end();
1347
1348            /* All variables and interface block members in the Input or Output storage classes
1349             * must be decorated with either a builtin or an explicit location.
1350             *
1351             * TODO: integrate the interface block support here. For now, don't complain --
1352             * a valid SPIRV module will only hit this path for the interface block case, as the
1353             * individual members of the type are decorated, rather than variable declarations.
1354             */
1355
1356            if (location != -1) {
1357                /* A user-defined interface variable, with a location. Where a variable
1358                 * occupied multiple locations, emit one result for each. */
1359                unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch);
1360                for (unsigned int offset = 0; offset < num_locations; offset++) {
1361                    interface_var v;
1362                    v.id = id;
1363                    v.type_id = type;
1364                    v.offset = offset;
1365                    v.is_patch = is_patch;
1366                    v.is_block_member = false;
1367                    out[std::make_pair(location + offset, component)] = v;
1368                }
1369            } else if (builtin == -1) {
1370                /* An interface block instance */
1371                collect_interface_block_members(my_data, src, out, blocks, is_array_of_verts, id, type, is_patch);
1372            }
1373        }
1374    }
1375}
1376
1377static void collect_interface_by_descriptor_slot(layer_data *my_data, shader_module const *src,
1378                                                 std::unordered_set<uint32_t> const &accessible_ids,
1379                                                 std::map<descriptor_slot_t, interface_var> &out) {
1380
1381    std::unordered_map<unsigned, unsigned> var_sets;
1382    std::unordered_map<unsigned, unsigned> var_bindings;
1383
1384    for (auto insn : *src) {
1385        /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1386         * DecorationDescriptorSet and DecorationBinding.
1387         */
1388        if (insn.opcode() == spv::OpDecorate) {
1389            if (insn.word(2) == spv::DecorationDescriptorSet) {
1390                var_sets[insn.word(1)] = insn.word(3);
1391            }
1392
1393            if (insn.word(2) == spv::DecorationBinding) {
1394                var_bindings[insn.word(1)] = insn.word(3);
1395            }
1396        }
1397    }
1398
1399    for (auto id : accessible_ids) {
1400        auto insn = src->get_def(id);
1401        assert(insn != src->end());
1402
1403        if (insn.opcode() == spv::OpVariable &&
1404            (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1405            unsigned set = value_or_default(var_sets, insn.word(2), 0);
1406            unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1407
1408            auto existing_it = out.find(std::make_pair(set, binding));
1409            if (existing_it != out.end()) {
1410                /* conflict within spv image */
1411                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1412                        __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
1413                        "var %d (type %d) in %s interface in descriptor slot (%u,%u) conflicts with existing definition",
1414                        insn.word(2), insn.word(1), storage_class_name(insn.word(3)), existing_it->first.first,
1415                        existing_it->first.second);
1416            }
1417
1418            interface_var v;
1419            v.id = insn.word(2);
1420            v.type_id = insn.word(1);
1421            v.offset = 0;
1422            v.is_patch = false;
1423            v.is_block_member = false;
1424            out[std::make_pair(set, binding)] = v;
1425        }
1426    }
1427}
1428
1429static bool validate_interface_between_stages(layer_data *my_data, shader_module const *producer,
1430                                              spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage,
1431                                              shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1432                                              shader_stage_attributes const *consumer_stage) {
1433    std::map<location_t, interface_var> outputs;
1434    std::map<location_t, interface_var> inputs;
1435
1436    bool pass = true;
1437
1438    collect_interface_by_location(my_data, producer, producer_entrypoint, spv::StorageClassOutput, outputs, producer_stage->arrayed_output);
1439    collect_interface_by_location(my_data, consumer, consumer_entrypoint, spv::StorageClassInput, inputs, consumer_stage->arrayed_input);
1440
1441    auto a_it = outputs.begin();
1442    auto b_it = inputs.begin();
1443
1444    /* maps sorted by key (location); walk them together to find mismatches */
1445    while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1446        bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1447        bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1448        auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1449        auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1450
1451        if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1452            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1453                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1454                        "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first,
1455                        a_first.second, consumer_stage->name)) {
1456                pass = false;
1457            }
1458            a_it++;
1459        } else if (a_at_end || a_first > b_first) {
1460            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1461                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
1462                        "%s consumes input location %u.%u which is not written by %s", consumer_stage->name, b_first.first, b_first.second,
1463                        producer_stage->name)) {
1464                pass = false;
1465            }
1466            b_it++;
1467        } else {
1468            // subtleties of arrayed interfaces:
1469            // - if is_patch, then the member is not arrayed, even though the interface may be.
1470            // - if is_block_member, then the extra array level of an arrayed interface is not
1471            //   expressed in the member type -- it's expressed in the block type.
1472            if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id,
1473                             producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member,
1474                             consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member,
1475                             true)) {
1476                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1477                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1478                            a_first.first, a_first.second,
1479                            describe_type(producer, a_it->second.type_id).c_str(),
1480                            describe_type(consumer, b_it->second.type_id).c_str())) {
1481                    pass = false;
1482                }
1483            }
1484            if (a_it->second.is_patch != b_it->second.is_patch) {
1485                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1486                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1487                            "Decoration mismatch on location %u.%u: is per-%s in %s stage but "
1488                            "per-%s in %s stage", a_first.first, a_first.second,
1489                            a_it->second.is_patch ? "patch" : "vertex", producer_stage->name,
1490                            b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) {
1491                    pass = false;
1492                }
1493            }
1494            a_it++;
1495            b_it++;
1496        }
1497    }
1498
1499    return pass;
1500}
1501
1502enum FORMAT_TYPE {
1503    FORMAT_TYPE_UNDEFINED,
1504    FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
1505    FORMAT_TYPE_SINT,
1506    FORMAT_TYPE_UINT,
1507};
1508
1509static unsigned get_format_type(VkFormat fmt) {
1510    switch (fmt) {
1511    case VK_FORMAT_UNDEFINED:
1512        return FORMAT_TYPE_UNDEFINED;
1513    case VK_FORMAT_R8_SINT:
1514    case VK_FORMAT_R8G8_SINT:
1515    case VK_FORMAT_R8G8B8_SINT:
1516    case VK_FORMAT_R8G8B8A8_SINT:
1517    case VK_FORMAT_R16_SINT:
1518    case VK_FORMAT_R16G16_SINT:
1519    case VK_FORMAT_R16G16B16_SINT:
1520    case VK_FORMAT_R16G16B16A16_SINT:
1521    case VK_FORMAT_R32_SINT:
1522    case VK_FORMAT_R32G32_SINT:
1523    case VK_FORMAT_R32G32B32_SINT:
1524    case VK_FORMAT_R32G32B32A32_SINT:
1525    case VK_FORMAT_R64_SINT:
1526    case VK_FORMAT_R64G64_SINT:
1527    case VK_FORMAT_R64G64B64_SINT:
1528    case VK_FORMAT_R64G64B64A64_SINT:
1529    case VK_FORMAT_B8G8R8_SINT:
1530    case VK_FORMAT_B8G8R8A8_SINT:
1531    case VK_FORMAT_A8B8G8R8_SINT_PACK32:
1532    case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1533    case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1534        return FORMAT_TYPE_SINT;
1535    case VK_FORMAT_R8_UINT:
1536    case VK_FORMAT_R8G8_UINT:
1537    case VK_FORMAT_R8G8B8_UINT:
1538    case VK_FORMAT_R8G8B8A8_UINT:
1539    case VK_FORMAT_R16_UINT:
1540    case VK_FORMAT_R16G16_UINT:
1541    case VK_FORMAT_R16G16B16_UINT:
1542    case VK_FORMAT_R16G16B16A16_UINT:
1543    case VK_FORMAT_R32_UINT:
1544    case VK_FORMAT_R32G32_UINT:
1545    case VK_FORMAT_R32G32B32_UINT:
1546    case VK_FORMAT_R32G32B32A32_UINT:
1547    case VK_FORMAT_R64_UINT:
1548    case VK_FORMAT_R64G64_UINT:
1549    case VK_FORMAT_R64G64B64_UINT:
1550    case VK_FORMAT_R64G64B64A64_UINT:
1551    case VK_FORMAT_B8G8R8_UINT:
1552    case VK_FORMAT_B8G8R8A8_UINT:
1553    case VK_FORMAT_A8B8G8R8_UINT_PACK32:
1554    case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1555    case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1556        return FORMAT_TYPE_UINT;
1557    default:
1558        return FORMAT_TYPE_FLOAT;
1559    }
1560}
1561
1562/* characterizes a SPIR-V type appearing in an interface to a FF stage,
1563 * for comparison to a VkFormat's characterization above. */
1564static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1565    auto insn = src->get_def(type);
1566    assert(insn != src->end());
1567
1568    switch (insn.opcode()) {
1569    case spv::OpTypeInt:
1570        return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1571    case spv::OpTypeFloat:
1572        return FORMAT_TYPE_FLOAT;
1573    case spv::OpTypeVector:
1574        return get_fundamental_type(src, insn.word(2));
1575    case spv::OpTypeMatrix:
1576        return get_fundamental_type(src, insn.word(2));
1577    case spv::OpTypeArray:
1578        return get_fundamental_type(src, insn.word(2));
1579    case spv::OpTypePointer:
1580        return get_fundamental_type(src, insn.word(3));
1581    default:
1582        return FORMAT_TYPE_UNDEFINED;
1583    }
1584}
1585
1586static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1587    uint32_t bit_pos = u_ffs(stage);
1588    return bit_pos - 1;
1589}
1590
1591static bool validate_vi_consistency(layer_data *my_data, VkPipelineVertexInputStateCreateInfo const *vi) {
1592    /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
1593     * each binding should be specified only once.
1594     */
1595    std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1596    bool pass = true;
1597
1598    for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1599        auto desc = &vi->pVertexBindingDescriptions[i];
1600        auto &binding = bindings[desc->binding];
1601        if (binding) {
1602            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1603                        __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1604                        "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1605                pass = false;
1606            }
1607        } else {
1608            binding = desc;
1609        }
1610    }
1611
1612    return pass;
1613}
1614
1615static bool validate_vi_against_vs_inputs(layer_data *my_data, VkPipelineVertexInputStateCreateInfo const *vi,
1616                                          shader_module const *vs, spirv_inst_iter entrypoint) {
1617    std::map<location_t, interface_var> inputs;
1618    bool pass = true;
1619
1620    collect_interface_by_location(my_data, vs, entrypoint, spv::StorageClassInput, inputs, false);
1621
1622    /* Build index by location */
1623    std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1624    if (vi) {
1625        for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) {
1626            auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format);
1627            for (auto j = 0u; j < num_locations; j++) {
1628                attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i];
1629            }
1630        }
1631    }
1632
1633    auto it_a = attribs.begin();
1634    auto it_b = inputs.begin();
1635
1636    while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1637        bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1638        bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1639        auto a_first = a_at_end ? 0 : it_a->first;
1640        auto b_first = b_at_end ? 0 : it_b->first.first;
1641        if (!a_at_end && (b_at_end || a_first < b_first)) {
1642            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1643                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1644                        "Vertex attribute at location %d not consumed by VS", a_first)) {
1645                pass = false;
1646            }
1647            it_a++;
1648        } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1649            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1650                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "VS consumes input at location %d but not provided",
1651                        b_first)) {
1652                pass = false;
1653            }
1654            it_b++;
1655        } else {
1656            unsigned attrib_type = get_format_type(it_a->second->format);
1657            unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1658
1659            /* type checking */
1660            if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1661                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1662                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1663                            "Attribute type of `%s` at location %d does not match VS input type of `%s`",
1664                            string_VkFormat(it_a->second->format), a_first,
1665                            describe_type(vs, it_b->second.type_id).c_str())) {
1666                    pass = false;
1667                }
1668            }
1669
1670            /* OK! */
1671            it_a++;
1672            it_b++;
1673        }
1674    }
1675
1676    return pass;
1677}
1678
1679static bool validate_fs_outputs_against_render_pass(layer_data *my_data, shader_module const *fs,
1680                                                    spirv_inst_iter entrypoint, RENDER_PASS_NODE const *rp, uint32_t subpass) {
1681    const std::vector<VkFormat> &color_formats = rp->subpassColorFormats[subpass];
1682    std::map<location_t, interface_var> outputs;
1683    bool pass = true;
1684
1685    /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
1686
1687    collect_interface_by_location(my_data, fs, entrypoint, spv::StorageClassOutput, outputs, false);
1688
1689    auto it = outputs.begin();
1690    uint32_t attachment = 0;
1691
1692    /* Walk attachment list and outputs together -- this is a little overpowered since attachments
1693     * are currently dense, but the parallel with matching between shader stages is nice.
1694     */
1695
1696    while ((outputs.size() > 0 && it != outputs.end()) || attachment < color_formats.size()) {
1697        if (attachment == color_formats.size() || (it != outputs.end() && it->first.first < attachment)) {
1698            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1699                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1700                        "FS writes to output location %d with no matching attachment", it->first.first)) {
1701                pass = false;
1702            }
1703            it++;
1704        } else if (it == outputs.end() || it->first.first > attachment) {
1705            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1706                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by FS", attachment)) {
1707                pass = false;
1708            }
1709            attachment++;
1710        } else {
1711            unsigned output_type = get_fundamental_type(fs, it->second.type_id);
1712            unsigned att_type = get_format_type(color_formats[attachment]);
1713
1714            /* type checking */
1715            if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
1716                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1717                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1718                            "Attachment %d of type `%s` does not match FS output type of `%s`", attachment,
1719                            string_VkFormat(color_formats[attachment]),
1720                            describe_type(fs, it->second.type_id).c_str())) {
1721                    pass = false;
1722                }
1723            }
1724
1725            /* OK! */
1726            it++;
1727            attachment++;
1728        }
1729    }
1730
1731    return pass;
1732}
1733
1734/* For some analyses, we need to know about all ids referenced by the static call tree of a particular
1735 * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint,
1736 * for example.
1737 * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
1738 *  - NOT the shader input/output interfaces.
1739 *
1740 * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
1741 * converting parts of this to be generated from the machine-readable spec instead.
1742 */
1743static void mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint, std::unordered_set<uint32_t> &ids) {
1744    std::unordered_set<uint32_t> worklist;
1745    worklist.insert(entrypoint.word(2));
1746
1747    while (!worklist.empty()) {
1748        auto id_iter = worklist.begin();
1749        auto id = *id_iter;
1750        worklist.erase(id_iter);
1751
1752        auto insn = src->get_def(id);
1753        if (insn == src->end()) {
1754            /* id is something we didn't collect in build_def_index. that's OK -- we'll stumble
1755             * across all kinds of things here that we may not care about. */
1756            continue;
1757        }
1758
1759        /* try to add to the output set */
1760        if (!ids.insert(id).second) {
1761            continue; /* if we already saw this id, we don't want to walk it again. */
1762        }
1763
1764        switch (insn.opcode()) {
1765        case spv::OpFunction:
1766            /* scan whole body of the function, enlisting anything interesting */
1767            while (++insn, insn.opcode() != spv::OpFunctionEnd) {
1768                switch (insn.opcode()) {
1769                case spv::OpLoad:
1770                case spv::OpAtomicLoad:
1771                case spv::OpAtomicExchange:
1772                case spv::OpAtomicCompareExchange:
1773                case spv::OpAtomicCompareExchangeWeak:
1774                case spv::OpAtomicIIncrement:
1775                case spv::OpAtomicIDecrement:
1776                case spv::OpAtomicIAdd:
1777                case spv::OpAtomicISub:
1778                case spv::OpAtomicSMin:
1779                case spv::OpAtomicUMin:
1780                case spv::OpAtomicSMax:
1781                case spv::OpAtomicUMax:
1782                case spv::OpAtomicAnd:
1783                case spv::OpAtomicOr:
1784                case spv::OpAtomicXor:
1785                    worklist.insert(insn.word(3)); /* ptr */
1786                    break;
1787                case spv::OpStore:
1788                case spv::OpAtomicStore:
1789                    worklist.insert(insn.word(1)); /* ptr */
1790                    break;
1791                case spv::OpAccessChain:
1792                case spv::OpInBoundsAccessChain:
1793                    worklist.insert(insn.word(3)); /* base ptr */
1794                    break;
1795                case spv::OpSampledImage:
1796                case spv::OpImageSampleImplicitLod:
1797                case spv::OpImageSampleExplicitLod:
1798                case spv::OpImageSampleDrefImplicitLod:
1799                case spv::OpImageSampleDrefExplicitLod:
1800                case spv::OpImageSampleProjImplicitLod:
1801                case spv::OpImageSampleProjExplicitLod:
1802                case spv::OpImageSampleProjDrefImplicitLod:
1803                case spv::OpImageSampleProjDrefExplicitLod:
1804                case spv::OpImageFetch:
1805                case spv::OpImageGather:
1806                case spv::OpImageDrefGather:
1807                case spv::OpImageRead:
1808                case spv::OpImage:
1809                case spv::OpImageQueryFormat:
1810                case spv::OpImageQueryOrder:
1811                case spv::OpImageQuerySizeLod:
1812                case spv::OpImageQuerySize:
1813                case spv::OpImageQueryLod:
1814                case spv::OpImageQueryLevels:
1815                case spv::OpImageQuerySamples:
1816                case spv::OpImageSparseSampleImplicitLod:
1817                case spv::OpImageSparseSampleExplicitLod:
1818                case spv::OpImageSparseSampleDrefImplicitLod:
1819                case spv::OpImageSparseSampleDrefExplicitLod:
1820                case spv::OpImageSparseSampleProjImplicitLod:
1821                case spv::OpImageSparseSampleProjExplicitLod:
1822                case spv::OpImageSparseSampleProjDrefImplicitLod:
1823                case spv::OpImageSparseSampleProjDrefExplicitLod:
1824                case spv::OpImageSparseFetch:
1825                case spv::OpImageSparseGather:
1826                case spv::OpImageSparseDrefGather:
1827                case spv::OpImageTexelPointer:
1828                    worklist.insert(insn.word(3)); /* image or sampled image */
1829                    break;
1830                case spv::OpImageWrite:
1831                    worklist.insert(insn.word(1)); /* image -- different operand order to above */
1832                    break;
1833                case spv::OpFunctionCall:
1834                    for (uint32_t i = 3; i < insn.len(); i++) {
1835                        worklist.insert(insn.word(i)); /* fn itself, and all args */
1836                    }
1837                    break;
1838
1839                case spv::OpExtInst:
1840                    for (uint32_t i = 5; i < insn.len(); i++) {
1841                        worklist.insert(insn.word(i)); /* operands to ext inst */
1842                    }
1843                    break;
1844                }
1845            }
1846            break;
1847        }
1848    }
1849}
1850
1851static bool validate_push_constant_block_against_pipeline(layer_data *my_data,
1852                                                          std::vector<VkPushConstantRange> const *pushConstantRanges,
1853                                                          shader_module const *src, spirv_inst_iter type,
1854                                                          VkShaderStageFlagBits stage) {
1855    bool pass = true;
1856
1857    /* strip off ptrs etc */
1858    type = get_struct_type(src, type, false);
1859    assert(type != src->end());
1860
1861    /* validate directly off the offsets. this isn't quite correct for arrays
1862     * and matrices, but is a good first step. TODO: arrays, matrices, weird
1863     * sizes */
1864    for (auto insn : *src) {
1865        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1866
1867            if (insn.word(3) == spv::DecorationOffset) {
1868                unsigned offset = insn.word(4);
1869                auto size = 4; /* bytes; TODO: calculate this based on the type */
1870
1871                bool found_range = false;
1872                for (auto const &range : *pushConstantRanges) {
1873                    if (range.offset <= offset && range.offset + range.size >= offset + size) {
1874                        found_range = true;
1875
1876                        if ((range.stageFlags & stage) == 0) {
1877                            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1878                                        __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
1879                                        "Push constant range covering variable starting at "
1880                                        "offset %u not accessible from stage %s",
1881                                        offset, string_VkShaderStageFlagBits(stage))) {
1882                                pass = false;
1883                            }
1884                        }
1885
1886                        break;
1887                    }
1888                }
1889
1890                if (!found_range) {
1891                    if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1892                                __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
1893                                "Push constant range covering variable starting at "
1894                                "offset %u not declared in layout",
1895                                offset)) {
1896                        pass = false;
1897                    }
1898                }
1899            }
1900        }
1901    }
1902
1903    return pass;
1904}
1905
1906static bool validate_push_constant_usage(layer_data *my_data,
1907                                         std::vector<VkPushConstantRange> const *pushConstantRanges, shader_module const *src,
1908                                         std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
1909    bool pass = true;
1910
1911    for (auto id : accessible_ids) {
1912        auto def_insn = src->get_def(id);
1913        if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
1914            pass &= validate_push_constant_block_against_pipeline(my_data, pushConstantRanges, src,
1915                                                                 src->get_def(def_insn.word(1)), stage);
1916        }
1917    }
1918
1919    return pass;
1920}
1921
1922// For given pipelineLayout verify that the set_layout_node at slot.first
1923//  has the requested binding at slot.second and return ptr to that binding
1924static VkDescriptorSetLayoutBinding const * get_descriptor_binding(layer_data *my_data, PIPELINE_LAYOUT_NODE *pipelineLayout, descriptor_slot_t slot) {
1925
1926    if (!pipelineLayout)
1927        return nullptr;
1928
1929    if (slot.first >= pipelineLayout->descriptorSetLayouts.size())
1930        return nullptr;
1931
1932    auto &layout_node = my_data->descriptorSetLayoutMap[pipelineLayout->descriptorSetLayouts[slot.first]];
1933
1934    return layout_node.GetDescriptorSetLayoutBindingPtrFromBinding(slot.second);
1935}
1936
1937// Block of code at start here for managing/tracking Pipeline state that this layer cares about
1938
1939static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
1940
1941// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
1942//   Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
1943//   to that same cmd buffer by separate thread are not changing state from underneath us
1944// Track the last cmd buffer touched by this thread
1945
1946static bool hasDrawCmd(GLOBAL_CB_NODE *pCB) {
1947    for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
1948        if (pCB->drawCount[i])
1949            return true;
1950    }
1951    return false;
1952}
1953
1954// Check object status for selected flag state
1955static bool validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
1956                            DRAW_STATE_ERROR error_code, const char *fail_msg) {
1957    if (!(pNode->status & status_mask)) {
1958        return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1959                       reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, error_code, "DS",
1960                       "CB object %#" PRIxLEAST64 ": %s", reinterpret_cast<const uint64_t &>(pNode->commandBuffer), fail_msg);
1961    }
1962    return false;
1963}
1964
1965// Retrieve pipeline node ptr for given pipeline object
1966static PIPELINE_NODE *getPipeline(layer_data *my_data, const VkPipeline pipeline) {
1967    if (my_data->pipelineMap.find(pipeline) == my_data->pipelineMap.end()) {
1968        return NULL;
1969    }
1970    return my_data->pipelineMap[pipeline];
1971}
1972
1973// Return true if for a given PSO, the given state enum is dynamic, else return false
1974static bool isDynamic(const PIPELINE_NODE *pPipeline, const VkDynamicState state) {
1975    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
1976        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
1977            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
1978                return true;
1979        }
1980    }
1981    return false;
1982}
1983
1984// Validate state stored as flags at time of draw call
1985static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe, bool indexedDraw) {
1986    bool result;
1987    result = validate_status(dev_data, pCB, CBSTATUS_VIEWPORT_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_VIEWPORT_NOT_BOUND,
1988                             "Dynamic viewport state not set for this command buffer");
1989    result |= validate_status(dev_data, pCB, CBSTATUS_SCISSOR_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_SCISSOR_NOT_BOUND,
1990                              "Dynamic scissor state not set for this command buffer");
1991    if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
1992        ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
1993         (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
1994        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1995                                  DRAWSTATE_LINE_WIDTH_NOT_BOUND, "Dynamic line width state not set for this command buffer");
1996    }
1997    if (pPipe->graphicsPipelineCI.pRasterizationState &&
1998        (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
1999        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2000                                  DRAWSTATE_DEPTH_BIAS_NOT_BOUND, "Dynamic depth bias state not set for this command buffer");
2001    }
2002    if (pPipe->blendConstantsEnabled) {
2003        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2004                                  DRAWSTATE_BLEND_NOT_BOUND, "Dynamic blend constants state not set for this command buffer");
2005    }
2006    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2007        (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
2008        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2009                                  DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, "Dynamic depth bounds state not set for this command buffer");
2010    }
2011    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2012        (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
2013        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2014                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil read mask state not set for this command buffer");
2015        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2016                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil write mask state not set for this command buffer");
2017        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2018                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil reference state not set for this command buffer");
2019    }
2020    if (indexedDraw) {
2021        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2022                                  DRAWSTATE_INDEX_BUFFER_NOT_BOUND,
2023                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted");
2024    }
2025    return result;
2026}
2027
2028// Verify attachment reference compatibility according to spec
2029//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2030//  If both AttachmentReference arrays have requested index, check their corresponding AttachementDescriptions
2031//   to make sure that format and samples counts match.
2032//  If not, they are not compatible.
2033static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2034                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2035                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2036                                             const VkAttachmentDescription *pSecondaryAttachments) {
2037    if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2038        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment)
2039            return true;
2040    } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2041        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment)
2042            return true;
2043    } else { // format and sample count must match
2044        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2045             pSecondaryAttachments[pSecondary[index].attachment].format) &&
2046            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2047             pSecondaryAttachments[pSecondary[index].attachment].samples))
2048            return true;
2049    }
2050    // Format and sample counts didn't match
2051    return false;
2052}
2053
2054// For give primary and secondary RenderPass objects, verify that they're compatible
2055static bool verify_renderpass_compatibility(layer_data *my_data, const VkRenderPass primaryRP, const VkRenderPass secondaryRP,
2056                                            string &errorMsg) {
2057    if (my_data->renderPassMap.find(primaryRP) == my_data->renderPassMap.end()) {
2058        stringstream errorStr;
2059        errorStr << "invalid VkRenderPass (" << primaryRP << ")";
2060        errorMsg = errorStr.str();
2061        return false;
2062    } else if (my_data->renderPassMap.find(secondaryRP) == my_data->renderPassMap.end()) {
2063        stringstream errorStr;
2064        errorStr << "invalid VkRenderPass (" << secondaryRP << ")";
2065        errorMsg = errorStr.str();
2066        return false;
2067    }
2068    // Trivial pass case is exact same RP
2069    if (primaryRP == secondaryRP) {
2070        return true;
2071    }
2072    const VkRenderPassCreateInfo *primaryRPCI = my_data->renderPassMap[primaryRP]->pCreateInfo;
2073    const VkRenderPassCreateInfo *secondaryRPCI = my_data->renderPassMap[secondaryRP]->pCreateInfo;
2074    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2075        stringstream errorStr;
2076        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2077                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2078        errorMsg = errorStr.str();
2079        return false;
2080    }
2081    uint32_t spIndex = 0;
2082    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2083        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2084        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2085        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2086        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2087        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2088            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2089                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2090                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2091                stringstream errorStr;
2092                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2093                errorMsg = errorStr.str();
2094                return false;
2095            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2096                                                         primaryColorCount, primaryRPCI->pAttachments,
2097                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2098                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2099                stringstream errorStr;
2100                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2101                errorMsg = errorStr.str();
2102                return false;
2103            }
2104        }
2105
2106        if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2107                                              1, primaryRPCI->pAttachments,
2108                                              secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2109                                              1, secondaryRPCI->pAttachments)) {
2110            stringstream errorStr;
2111            errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
2112            errorMsg = errorStr.str();
2113            return false;
2114        }
2115
2116        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2117        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2118        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2119        for (uint32_t i = 0; i < inputMax; ++i) {
2120            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2121                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2122                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2123                stringstream errorStr;
2124                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2125                errorMsg = errorStr.str();
2126                return false;
2127            }
2128        }
2129    }
2130    return true;
2131}
2132
2133// For give SET_NODE, verify that its Set is compatible w/ the setLayout corresponding to pipelineLayout[layoutIndex]
2134static bool verify_set_layout_compatibility(layer_data *my_data, const SET_NODE *pSet, const VkPipelineLayout layout,
2135                                            const uint32_t layoutIndex, string &errorMsg) {
2136    auto pipeline_layout_it = my_data->pipelineLayoutMap.find(layout);
2137    if (pipeline_layout_it == my_data->pipelineLayoutMap.end()) {
2138        stringstream errorStr;
2139        errorStr << "invalid VkPipelineLayout (" << layout << ")";
2140        errorMsg = errorStr.str();
2141        return false;
2142    }
2143    if (layoutIndex >= pipeline_layout_it->second.descriptorSetLayouts.size()) {
2144        stringstream errorStr;
2145        errorStr << "VkPipelineLayout (" << layout << ") only contains " << pipeline_layout_it->second.descriptorSetLayouts.size()
2146                 << " setLayouts corresponding to sets 0-" << pipeline_layout_it->second.descriptorSetLayouts.size() - 1
2147                 << ", but you're attempting to bind set to index " << layoutIndex;
2148        errorMsg = errorStr.str();
2149        return false;
2150    }
2151    auto layout_node = my_data->descriptorSetLayoutMap[pipeline_layout_it->second.descriptorSetLayouts[layoutIndex]];
2152    return layout_node.IsCompatible(pSet->p_layout, &errorMsg);
2153}
2154
2155// Validate that data for each specialization entry is fully contained within the buffer.
2156static bool validate_specialization_offsets(layer_data *my_data, VkPipelineShaderStageCreateInfo const *info) {
2157    bool pass = true;
2158
2159    VkSpecializationInfo const *spec = info->pSpecializationInfo;
2160
2161    if (spec) {
2162        for (auto i = 0u; i < spec->mapEntryCount; i++) {
2163            if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2164                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2165                            /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC",
2166                            "Specialization entry %u (for constant id %u) references memory outside provided "
2167                            "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2168                            " bytes provided)",
2169                            i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2170                            spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) {
2171
2172                    pass = false;
2173                }
2174            }
2175        }
2176    }
2177
2178    return pass;
2179}
2180
2181static bool descriptor_type_match(layer_data *my_data, shader_module const *module, uint32_t type_id,
2182                                  VkDescriptorType descriptor_type, unsigned &descriptor_count) {
2183    auto type = module->get_def(type_id);
2184
2185    descriptor_count = 1;
2186
2187    /* Strip off any array or ptrs. Where we remove array levels, adjust the
2188     * descriptor count for each dimension. */
2189    while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2190        if (type.opcode() == spv::OpTypeArray) {
2191            descriptor_count *= get_constant_value(module, type.word(3));
2192            type = module->get_def(type.word(2));
2193        }
2194        else {
2195            type = module->get_def(type.word(3));
2196        }
2197    }
2198
2199    switch (type.opcode()) {
2200    case spv::OpTypeStruct: {
2201        for (auto insn : *module) {
2202            if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2203                if (insn.word(2) == spv::DecorationBlock) {
2204                    return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2205                           descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2206                } else if (insn.word(2) == spv::DecorationBufferBlock) {
2207                    return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2208                           descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2209                }
2210            }
2211        }
2212
2213        /* Invalid */
2214        return false;
2215    }
2216
2217    case spv::OpTypeSampler:
2218        return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER;
2219
2220    case spv::OpTypeSampledImage:
2221        if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
2222            /* Slight relaxation for some GLSL historical madness: samplerBuffer
2223             * doesn't really have a sampler, and a texel buffer descriptor
2224             * doesn't really provide one. Allow this slight mismatch.
2225             */
2226            auto image_type = module->get_def(type.word(2));
2227            auto dim = image_type.word(3);
2228            auto sampled = image_type.word(7);
2229            return dim == spv::DimBuffer && sampled == 1;
2230        }
2231        return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2232
2233    case spv::OpTypeImage: {
2234        /* Many descriptor types backing image types-- depends on dimension
2235         * and whether the image will be used with a sampler. SPIRV for
2236         * Vulkan requires that sampled be 1 or 2 -- leaving the decision to
2237         * runtime is unacceptable.
2238         */
2239        auto dim = type.word(3);
2240        auto sampled = type.word(7);
2241
2242        if (dim == spv::DimSubpassData) {
2243            return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2244        } else if (dim == spv::DimBuffer) {
2245            if (sampled == 1) {
2246                return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2247            } else {
2248                return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2249            }
2250        } else if (sampled == 1) {
2251            return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
2252        } else {
2253            return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2254        }
2255    }
2256
2257    /* We shouldn't really see any other junk types -- but if we do, they're
2258     * a mismatch.
2259     */
2260    default:
2261        return false; /* Mismatch */
2262    }
2263}
2264
2265static bool require_feature(layer_data *my_data, VkBool32 feature, char const *feature_name) {
2266    if (!feature) {
2267        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2268                    __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2269                    "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2270                    "enabled on the device",
2271                    feature_name)) {
2272            return false;
2273        }
2274    }
2275
2276    return true;
2277}
2278
2279static bool validate_shader_capabilities(layer_data *my_data, shader_module const *src) {
2280    bool pass = true;
2281
2282    auto enabledFeatures = &my_data->phys_dev_properties.features;
2283
2284    for (auto insn : *src) {
2285        if (insn.opcode() == spv::OpCapability) {
2286            switch (insn.word(1)) {
2287            case spv::CapabilityMatrix:
2288            case spv::CapabilityShader:
2289            case spv::CapabilityInputAttachment:
2290            case spv::CapabilitySampled1D:
2291            case spv::CapabilityImage1D:
2292            case spv::CapabilitySampledBuffer:
2293            case spv::CapabilityImageBuffer:
2294            case spv::CapabilityImageQuery:
2295            case spv::CapabilityDerivativeControl:
2296                // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2297                break;
2298
2299            case spv::CapabilityGeometry:
2300                pass &= require_feature(my_data, enabledFeatures->geometryShader, "geometryShader");
2301                break;
2302
2303            case spv::CapabilityTessellation:
2304                pass &= require_feature(my_data, enabledFeatures->tessellationShader, "tessellationShader");
2305                break;
2306
2307            case spv::CapabilityFloat64:
2308                pass &= require_feature(my_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2309                break;
2310
2311            case spv::CapabilityInt64:
2312                pass &= require_feature(my_data, enabledFeatures->shaderInt64, "shaderInt64");
2313                break;
2314
2315            case spv::CapabilityTessellationPointSize:
2316            case spv::CapabilityGeometryPointSize:
2317                pass &= require_feature(my_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2318                                        "shaderTessellationAndGeometryPointSize");
2319                break;
2320
2321            case spv::CapabilityImageGatherExtended:
2322                pass &= require_feature(my_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2323                break;
2324
2325            case spv::CapabilityStorageImageMultisample:
2326                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2327                break;
2328
2329            case spv::CapabilityUniformBufferArrayDynamicIndexing:
2330                pass &= require_feature(my_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2331                                        "shaderUniformBufferArrayDynamicIndexing");
2332                break;
2333
2334            case spv::CapabilitySampledImageArrayDynamicIndexing:
2335                pass &= require_feature(my_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2336                                        "shaderSampledImageArrayDynamicIndexing");
2337                break;
2338
2339            case spv::CapabilityStorageBufferArrayDynamicIndexing:
2340                pass &= require_feature(my_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2341                                        "shaderStorageBufferArrayDynamicIndexing");
2342                break;
2343
2344            case spv::CapabilityStorageImageArrayDynamicIndexing:
2345                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2346                                        "shaderStorageImageArrayDynamicIndexing");
2347                break;
2348
2349            case spv::CapabilityClipDistance:
2350                pass &= require_feature(my_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2351                break;
2352
2353            case spv::CapabilityCullDistance:
2354                pass &= require_feature(my_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2355                break;
2356
2357            case spv::CapabilityImageCubeArray:
2358                pass &= require_feature(my_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2359                break;
2360
2361            case spv::CapabilitySampleRateShading:
2362                pass &= require_feature(my_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2363                break;
2364
2365            case spv::CapabilitySparseResidency:
2366                pass &= require_feature(my_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2367                break;
2368
2369            case spv::CapabilityMinLod:
2370                pass &= require_feature(my_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2371                break;
2372
2373            case spv::CapabilitySampledCubeArray:
2374                pass &= require_feature(my_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2375                break;
2376
2377            case spv::CapabilityImageMSArray:
2378                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2379                break;
2380
2381            case spv::CapabilityStorageImageExtendedFormats:
2382                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageExtendedFormats,
2383                                        "shaderStorageImageExtendedFormats");
2384                break;
2385
2386            case spv::CapabilityInterpolationFunction:
2387                pass &= require_feature(my_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2388                break;
2389
2390            case spv::CapabilityStorageImageReadWithoutFormat:
2391                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2392                                        "shaderStorageImageReadWithoutFormat");
2393                break;
2394
2395            case spv::CapabilityStorageImageWriteWithoutFormat:
2396                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2397                                        "shaderStorageImageWriteWithoutFormat");
2398                break;
2399
2400            case spv::CapabilityMultiViewport:
2401                pass &= require_feature(my_data, enabledFeatures->multiViewport, "multiViewport");
2402                break;
2403
2404            default:
2405                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2406                            __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2407                            "Shader declares capability %u, not supported in Vulkan.",
2408                            insn.word(1)))
2409                    pass = false;
2410                break;
2411            }
2412        }
2413    }
2414
2415    return pass;
2416}
2417
2418static bool validate_pipeline_shader_stage(layer_data *dev_data, VkPipelineShaderStageCreateInfo const *pStage,
2419                                           PIPELINE_NODE *pipeline, PIPELINE_LAYOUT_NODE *pipelineLayout,
2420                                           shader_module **out_module, spirv_inst_iter *out_entrypoint) {
2421    bool pass = true;
2422    auto module = *out_module = dev_data->shaderModuleMap[pStage->module].get();
2423    pass &= validate_specialization_offsets(dev_data, pStage);
2424
2425    /* find the entrypoint */
2426    auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
2427    if (entrypoint == module->end()) {
2428        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2429                    __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC",
2430                    "No entrypoint found named `%s` for stage %s", pStage->pName,
2431                    string_VkShaderStageFlagBits(pStage->stage))) {
2432            pass = false;
2433        }
2434    }
2435
2436    /* validate shader capabilities against enabled device features */
2437    pass &= validate_shader_capabilities(dev_data, module);
2438
2439    /* mark accessible ids */
2440    std::unordered_set<uint32_t> accessible_ids;
2441    mark_accessible_ids(module, entrypoint, accessible_ids);
2442
2443    /* validate descriptor set layout against what the entrypoint actually uses */
2444    std::map<descriptor_slot_t, interface_var> descriptor_uses;
2445    collect_interface_by_descriptor_slot(dev_data, module, accessible_ids, descriptor_uses);
2446
2447    /* validate push constant usage */
2448    pass &= validate_push_constant_usage(dev_data, &pipelineLayout->pushConstantRanges,
2449                                        module, accessible_ids, pStage->stage);
2450
2451    /* validate descriptor use */
2452    for (auto use : descriptor_uses) {
2453        // While validating shaders capture which slots are used by the pipeline
2454        pipeline->active_slots[use.first.first].insert(use.first.second);
2455
2456        /* verify given pipelineLayout has requested setLayout with requested binding */
2457        auto binding = get_descriptor_binding(dev_data, pipelineLayout, use.first);
2458        unsigned required_descriptor_count;
2459
2460        if (!binding) {
2461            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2462                        __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2463                        "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2464                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2465                pass = false;
2466            }
2467        } else if (~binding->stageFlags & pStage->stage) {
2468            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2469                        /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2470                        "Shader uses descriptor slot %u.%u (used "
2471                        "as type `%s`) but descriptor not "
2472                        "accessible from stage %s",
2473                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2474                        string_VkShaderStageFlagBits(pStage->stage))) {
2475                pass = false;
2476            }
2477        } else if (!descriptor_type_match(dev_data, module, use.second.type_id, binding->descriptorType,
2478                                          /*out*/ required_descriptor_count)) {
2479            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2480                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC", "Type mismatch on descriptor slot "
2481                                                                       "%u.%u (used as type `%s`) but "
2482                                                                       "descriptor of type %s",
2483                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2484                        string_VkDescriptorType(binding->descriptorType))) {
2485                pass = false;
2486            }
2487        } else if (binding->descriptorCount < required_descriptor_count) {
2488            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2489                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2490                        "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2491                        required_descriptor_count, use.first.first, use.first.second,
2492                        describe_type(module, use.second.type_id).c_str(), binding->descriptorCount)) {
2493                pass = false;
2494            }
2495        }
2496    }
2497
2498    return pass;
2499}
2500
2501
2502// Validate that the shaders used by the given pipeline and store the active_slots
2503//  that are actually used by the pipeline into pPipeline->active_slots
2504static bool validate_and_capture_pipeline_shader_state(layer_data *my_data, PIPELINE_NODE *pPipeline) {
2505    auto pCreateInfo = reinterpret_cast<VkGraphicsPipelineCreateInfo const *>(&pPipeline->graphicsPipelineCI);
2506    int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2507    int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2508
2509    shader_module *shaders[5];
2510    memset(shaders, 0, sizeof(shaders));
2511    spirv_inst_iter entrypoints[5];
2512    memset(entrypoints, 0, sizeof(entrypoints));
2513    VkPipelineVertexInputStateCreateInfo const *vi = 0;
2514    bool pass = true;
2515
2516    auto pipelineLayout = pCreateInfo->layout != VK_NULL_HANDLE ? &my_data->pipelineLayoutMap[pCreateInfo->layout] : nullptr;
2517
2518    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2519        VkPipelineShaderStageCreateInfo const *pStage =
2520            reinterpret_cast<VkPipelineShaderStageCreateInfo const *>(&pCreateInfo->pStages[i]);
2521        auto stage_id = get_shader_stage_id(pStage->stage);
2522        pass &= validate_pipeline_shader_stage(my_data, pStage, pPipeline, pipelineLayout,
2523                                               &shaders[stage_id], &entrypoints[stage_id]);
2524    }
2525
2526    vi = pCreateInfo->pVertexInputState;
2527
2528    if (vi) {
2529        pass &= validate_vi_consistency(my_data, vi);
2530    }
2531
2532    if (shaders[vertex_stage]) {
2533        pass &= validate_vi_against_vs_inputs(my_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
2534    }
2535
2536    int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2537    int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2538
2539    while (!shaders[producer] && producer != fragment_stage) {
2540        producer++;
2541        consumer++;
2542    }
2543
2544    for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2545        assert(shaders[producer]);
2546        if (shaders[consumer]) {
2547            pass &= validate_interface_between_stages(my_data,
2548                                                      shaders[producer], entrypoints[producer], &shader_stage_attribs[producer],
2549                                                      shaders[consumer], entrypoints[consumer], &shader_stage_attribs[consumer]);
2550
2551            producer = consumer;
2552        }
2553    }
2554
2555    auto rp = pCreateInfo->renderPass != VK_NULL_HANDLE ? my_data->renderPassMap[pCreateInfo->renderPass] : nullptr;
2556
2557    if (shaders[fragment_stage] && rp) {
2558        pass &= validate_fs_outputs_against_render_pass(my_data, shaders[fragment_stage], entrypoints[fragment_stage], rp,
2559                                                       pCreateInfo->subpass);
2560    }
2561
2562    return pass;
2563}
2564
2565static bool validate_compute_pipeline(layer_data *my_data, PIPELINE_NODE *pPipeline) {
2566    auto pCreateInfo = reinterpret_cast<VkComputePipelineCreateInfo const *>(&pPipeline->computePipelineCI);
2567
2568    auto pipelineLayout = pCreateInfo->layout != VK_NULL_HANDLE ? &my_data->pipelineLayoutMap[pCreateInfo->layout] : nullptr;
2569
2570    shader_module *module;
2571    spirv_inst_iter entrypoint;
2572
2573    return validate_pipeline_shader_stage(my_data, &pCreateInfo->stage, pPipeline, pipelineLayout,
2574                                          &module, &entrypoint);
2575}
2576
2577// Return Set node ptr for specified set or else NULL
2578static SET_NODE *getSetNode(layer_data *my_data, const VkDescriptorSet set) {
2579    if (my_data->setMap.find(set) == my_data->setMap.end()) {
2580        return NULL;
2581    }
2582    return my_data->setMap[set];
2583}
2584// For the given command buffer, verify and update the state for activeSetBindingsPairs
2585//  This includes:
2586//  1. Verifying that any dynamic descriptor in that set has a valid dynamic offset bound.
2587//     To be valid, the dynamic offset combined with the offset and range from its
2588//     descriptor update must not overflow the size of its buffer being updated
2589//  2. Grow updateImages for given pCB to include any bound STORAGE_IMAGE descriptor images
2590//  3. Grow updateBuffers for pCB to include buffers from STORAGE*_BUFFER descriptor buffers
2591static bool validate_and_update_drawtime_descriptor_state(
2592    layer_data *dev_data, GLOBAL_CB_NODE *pCB,
2593    const vector<std::pair<SET_NODE *, unordered_set<uint32_t>>> &activeSetBindingsPairs) {
2594    bool result = false;
2595
2596    VkWriteDescriptorSet *pWDS = NULL;
2597    uint32_t dynOffsetIndex = 0;
2598    VkDeviceSize bufferSize = 0;
2599    for (auto set_bindings_pair : activeSetBindingsPairs) {
2600        SET_NODE *set_node = set_bindings_pair.first;
2601        auto layout_node = set_node->p_layout;
2602        for (auto binding : set_bindings_pair.second) {
2603            if ((set_node->p_layout->GetTypeFromBinding(binding) == VK_DESCRIPTOR_TYPE_SAMPLER) &&
2604                (set_node->p_layout->GetDescriptorCountFromBinding(binding) != 0) &&
2605                (set_node->p_layout->GetImmutableSamplerPtrFromBinding(binding))) {
2606                // No work for immutable sampler binding
2607            } else {
2608                uint32_t startIdx = layout_node->GetGlobalStartIndexFromBinding(binding);
2609                uint32_t endIdx = layout_node->GetGlobalEndIndexFromBinding(binding);
2610                for (uint32_t i = startIdx; i <= endIdx; ++i) {
2611                    // We did check earlier to verify that set was updated, but now make sure given slot was updated
2612                    // TODO : Would be better to store set# that set is bound to so we can report set.binding[index] not updated
2613                    // For immutable sampler w/o combined image, don't need to update
2614                    if (!set_node->pDescriptorUpdates[i]) {
2615                        result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2616                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<const uint64_t &>(set_node->set), __LINE__,
2617                                            DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2618                                            "DS %#" PRIxLEAST64 " bound and active but it never had binding %u updated. It is now being used to draw so "
2619                                                                "this will result in undefined behavior.",
2620                                            reinterpret_cast<const uint64_t &>(set_node->set), binding);
2621                    } else {
2622                        switch (set_node->pDescriptorUpdates[i]->sType) {
2623                        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
2624                            pWDS = (VkWriteDescriptorSet *)set_node->pDescriptorUpdates[i];
2625
2626                            // Verify uniform and storage buffers actually are bound to valid memory at draw time.
2627                            if ((pWDS->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) ||
2628                                (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
2629                                (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER) ||
2630                                (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
2631                                for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2632                                    auto buffer_node = dev_data->bufferMap.find(pWDS->pBufferInfo[j].buffer);
2633                                    if (buffer_node == dev_data->bufferMap.end()) {
2634                                        result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2635                                                          VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2636                                                          reinterpret_cast<const uint64_t &>(set_node->set), __LINE__,
2637                                                          DRAWSTATE_INVALID_BUFFER, "DS",
2638                                                          "VkDescriptorSet (%#" PRIxLEAST64 ") %s (%#" PRIxLEAST64 ") at index #%u"
2639                                                          " is not defined!  Has vkCreateBuffer been called?",
2640                                                          reinterpret_cast<const uint64_t &>(set_node->set),
2641                                                          string_VkDescriptorType(pWDS->descriptorType),
2642                                                          reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), i);
2643                                    } else {
2644                                        auto mem_entry = dev_data->memObjMap.find(buffer_node->second.mem);
2645                                        if (mem_entry == dev_data->memObjMap.end()) {
2646                                            result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2647                                                              VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2648                                                              reinterpret_cast<const uint64_t &>(set_node->set), __LINE__,
2649                                                              DRAWSTATE_INVALID_BUFFER, "DS",
2650                                                              "VkDescriptorSet (%#" PRIxLEAST64 ") %s (%#" PRIxLEAST64 ") at index"
2651                                                              " #%u, has no memory bound to it!",
2652                                                              reinterpret_cast<const uint64_t &>(set_node->set),
2653                                                              string_VkDescriptorType(pWDS->descriptorType),
2654                                                              reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), i);
2655                                        }
2656                                    }
2657                                    // If it's a dynamic buffer, make sure the offsets are within the buffer.
2658                                    if ((pWDS->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
2659                                        (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
2660                                        bufferSize = dev_data->bufferMap[pWDS->pBufferInfo[j].buffer].createInfo.size;
2661                                        uint32_t dynOffset =
2662                                            pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].dynamicOffsets[dynOffsetIndex];
2663                                        if (pWDS->pBufferInfo[j].range == VK_WHOLE_SIZE) {
2664                                            if ((dynOffset + pWDS->pBufferInfo[j].offset) > bufferSize) {
2665                                                result |= log_msg(
2666                                                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2667                                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2668                                                    reinterpret_cast<const uint64_t &>(set_node->set), __LINE__,
2669                                                    DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, "DS",
2670                                                    "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has range of "
2671                                                    "VK_WHOLE_SIZE but dynamic offset %#" PRIxLEAST32 ". "
2672                                                    "combined with offset %#" PRIxLEAST64 " oversteps its buffer (%#" PRIxLEAST64
2673                                                    ") which has a size of %#" PRIxLEAST64 ".",
2674                                                    reinterpret_cast<const uint64_t &>(set_node->set), i, dynOffset,
2675                                                    pWDS->pBufferInfo[j].offset,
2676                                                    reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize);
2677                                            }
2678                                        } else if ((dynOffset + pWDS->pBufferInfo[j].offset + pWDS->pBufferInfo[j].range) >
2679                                                   bufferSize) {
2680                                            result |=
2681                                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2682                                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2683                                                        reinterpret_cast<const uint64_t &>(set_node->set), __LINE__,
2684                                                        DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, "DS",
2685                                                        "VkDescriptorSet (%#" PRIxLEAST64
2686                                                        ") bound as set #%u has dynamic offset %#" PRIxLEAST32 ". "
2687                                                        "Combined with offset %#" PRIxLEAST64 " and range %#" PRIxLEAST64
2688                                                        " from its update, this oversteps its buffer "
2689                                                        "(%#" PRIxLEAST64 ") which has a size of %#" PRIxLEAST64 ".",
2690                                                        reinterpret_cast<const uint64_t &>(set_node->set), i, dynOffset,
2691                                                        pWDS->pBufferInfo[j].offset, pWDS->pBufferInfo[j].range,
2692                                                        reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize);
2693                                        }
2694                                        dynOffsetIndex++;
2695                                    }
2696                                }
2697                            }
2698                            if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
2699                                for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2700                                    pCB->updateImages.insert(pWDS->pImageInfo[j].imageView);
2701                                }
2702                            } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) {
2703                                for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2704                                    assert(dev_data->bufferViewMap.find(pWDS->pTexelBufferView[j]) != dev_data->bufferViewMap.end());
2705                                    pCB->updateBuffers.insert(dev_data->bufferViewMap[pWDS->pTexelBufferView[j]].buffer);
2706                                }
2707                            } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2708                                       pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
2709                                for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2710                                    pCB->updateBuffers.insert(pWDS->pBufferInfo[j].buffer);
2711                                }
2712                            }
2713                            i += pWDS->descriptorCount; // Advance i to end of this set of descriptors (++i at end of for loop will move 1
2714                                                        // index past last of these descriptors)
2715                            break;
2716                        default: // Currently only shadowing Write update nodes so shouldn't get here
2717                            assert(0);
2718                            continue;
2719                        }
2720                    }
2721                }
2722            }
2723        }
2724    }
2725    return result;
2726}
2727// TODO : This is a temp function that naively updates bound storage images and buffers based on which descriptor sets are bound.
2728//   When validate_and_update_draw_state() handles computer shaders so that active_slots is correct for compute pipelines, this
2729//   function can be killed and validate_and_update_draw_state() used instead
2730static void update_shader_storage_images_and_buffers(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
2731    VkWriteDescriptorSet *pWDS = nullptr;
2732    SET_NODE *pSet = nullptr;
2733    // For the bound descriptor sets, pull off any storage images and buffers
2734    //  This may be more than are actually updated depending on which are active, but for now this is a stop-gap for compute
2735    //  pipelines
2736    for (auto set : pCB->lastBound[VK_PIPELINE_BIND_POINT_COMPUTE].uniqueBoundSets) {
2737        // Get the set node
2738        pSet = getSetNode(dev_data, set);
2739        // For each update in the set
2740        for (auto pUpdate : pSet->pDescriptorUpdates) {
2741            // If it's a write update to STORAGE type capture image/buffer being updated
2742            if (pUpdate && (pUpdate->sType == VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET)) {
2743                pWDS = reinterpret_cast<VkWriteDescriptorSet *>(pUpdate);
2744                if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
2745                    for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2746                        pCB->updateImages.insert(pWDS->pImageInfo[j].imageView);
2747                    }
2748                } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) {
2749                    for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2750                        pCB->updateBuffers.insert(dev_data->bufferViewMap[pWDS->pTexelBufferView[j]].buffer);
2751                    }
2752                } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2753                           pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
2754                    for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2755                        pCB->updateBuffers.insert(pWDS->pBufferInfo[j].buffer);
2756                    }
2757                }
2758            }
2759        }
2760    }
2761}
2762
2763// Validate overall state at the time of a draw call
2764static bool validate_and_update_draw_state(layer_data *my_data, GLOBAL_CB_NODE *pCB, const bool indexedDraw,
2765                                           const VkPipelineBindPoint bindPoint) {
2766    bool result = false;
2767    auto const &state = pCB->lastBound[bindPoint];
2768    PIPELINE_NODE *pPipe = getPipeline(my_data, state.pipeline);
2769    // First check flag states
2770    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
2771        result = validate_draw_state_flags(my_data, pCB, pPipe, indexedDraw);
2772
2773    // Now complete other state checks
2774    // TODO : Currently only performing next check if *something* was bound (non-zero last bound)
2775    //  There is probably a better way to gate when this check happens, and to know if something *should* have been bound
2776    //  We should have that check separately and then gate this check based on that check
2777    if (pPipe) {
2778        if (state.pipelineLayout) {
2779            string errorString;
2780            // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets
2781            vector<std::pair<SET_NODE *, unordered_set<uint32_t>>> activeSetBindingsPairs;
2782            for (auto setBindingPair : pPipe->active_slots) {
2783                uint32_t setIndex = setBindingPair.first;
2784                // If valid set is not bound throw an error
2785                if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
2786                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2787                                      __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
2788                                      "VkPipeline %#" PRIxLEAST64 " uses set #%u but that set is not bound.",
2789                                      (uint64_t)pPipe->pipeline, setIndex);
2790                } else if (!verify_set_layout_compatibility(my_data, my_data->setMap[state.boundDescriptorSets[setIndex]],
2791                                                            pPipe->graphicsPipelineCI.layout, setIndex, errorString)) {
2792                    // Set is bound but not compatible w/ overlapping pipelineLayout from PSO
2793                    VkDescriptorSet setHandle = my_data->setMap[state.boundDescriptorSets[setIndex]]->set;
2794                    result |= log_msg(
2795                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2796                        (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
2797                        "VkDescriptorSet (%#" PRIxLEAST64
2798                        ") bound as set #%u is not compatible with overlapping VkPipelineLayout %#" PRIxLEAST64 " due to: %s",
2799                        (uint64_t)setHandle, setIndex, (uint64_t)pPipe->graphicsPipelineCI.layout, errorString.c_str());
2800                } else { // Valid set is bound and layout compatible, validate that it's updated
2801                    // Pull the set node
2802                    SET_NODE *pSet = my_data->setMap[state.boundDescriptorSets[setIndex]];
2803                    // Save vector of all active sets to verify dynamicOffsets below
2804                    activeSetBindingsPairs.push_back(std::make_pair(pSet, setBindingPair.second));
2805                    // Make sure set has been updated if it has no immutable samplers
2806                    //  If it has immutable samplers, we'll flag error later as needed depending on binding
2807                    if (!pSet->pUpdateStructs) {
2808                        for (auto binding : setBindingPair.second) {
2809                            if (!pSet->p_layout->GetImmutableSamplerPtrFromBinding(binding)) {
2810                                result |=
2811                                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2812                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pSet->set, __LINE__,
2813                                            DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2814                                            "DS %#" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so "
2815                                            "this will result in undefined behavior.",
2816                                            (uint64_t)pSet->set);
2817                            }
2818                        }
2819                    }
2820                }
2821            }
2822            // For given active slots, verify any dynamic descriptors and record updated images & buffers
2823            result |= validate_and_update_drawtime_descriptor_state(my_data, pCB, activeSetBindingsPairs);
2824        }
2825        if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint) {
2826            // Verify Vtx binding
2827            if (pPipe->vertexBindingDescriptions.size() > 0) {
2828                for (size_t i = 0; i < pPipe->vertexBindingDescriptions.size(); i++) {
2829                    if ((pCB->currentDrawData.buffers.size() < (i + 1)) || (pCB->currentDrawData.buffers[i] == VK_NULL_HANDLE)) {
2830                        result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2831                                          __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2832                                          "The Pipeline State Object (%#" PRIxLEAST64
2833                                          ") expects that this Command Buffer's vertex binding Index " PRINTF_SIZE_T_SPECIFIER
2834                                          " should be set via vkCmdBindVertexBuffers.",
2835                                          (uint64_t)state.pipeline, i);
2836                    }
2837                }
2838            } else {
2839                if (!pCB->currentDrawData.buffers.empty()) {
2840                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
2841                                      (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2842                                      "Vertex buffers are bound to command buffer (%#" PRIxLEAST64
2843                                      ") but no vertex buffers are attached to this Pipeline State Object (%#" PRIxLEAST64 ").",
2844                                      (uint64_t)pCB->commandBuffer, (uint64_t)state.pipeline);
2845                }
2846            }
2847            // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
2848            // Skip check if rasterization is disabled or there is no viewport.
2849            if ((!pPipe->graphicsPipelineCI.pRasterizationState ||
2850                 (pPipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
2851                pPipe->graphicsPipelineCI.pViewportState) {
2852                bool dynViewport = isDynamic(pPipe, VK_DYNAMIC_STATE_VIEWPORT);
2853                bool dynScissor = isDynamic(pPipe, VK_DYNAMIC_STATE_SCISSOR);
2854                if (dynViewport) {
2855                    if (pCB->viewports.size() != pPipe->graphicsPipelineCI.pViewportState->viewportCount) {
2856                        result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2857                                          __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2858                                          "Dynamic viewportCount from vkCmdSetViewport() is " PRINTF_SIZE_T_SPECIFIER
2859                                          ", but PSO viewportCount is %u. These counts must match.",
2860                                          pCB->viewports.size(), pPipe->graphicsPipelineCI.pViewportState->viewportCount);
2861                    }
2862                }
2863                if (dynScissor) {
2864                    if (pCB->scissors.size() != pPipe->graphicsPipelineCI.pViewportState->scissorCount) {
2865                        result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2866                                          __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2867                                          "Dynamic scissorCount from vkCmdSetScissor() is " PRINTF_SIZE_T_SPECIFIER
2868                                          ", but PSO scissorCount is %u. These counts must match.",
2869                                          pCB->scissors.size(), pPipe->graphicsPipelineCI.pViewportState->scissorCount);
2870                    }
2871                }
2872            }
2873        }
2874    }
2875    return result;
2876}
2877
2878// Validate HW line width capabilities prior to setting requested line width.
2879static bool verifyLineWidth(layer_data *my_data, DRAW_STATE_ERROR dsError, const uint64_t &target, float lineWidth) {
2880    bool skip_call = false;
2881
2882    // First check to see if the physical device supports wide lines.
2883    if ((VK_FALSE == my_data->phys_dev_properties.features.wideLines) && (1.0f != lineWidth)) {
2884        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target, __LINE__,
2885                             dsError, "DS", "Attempt to set lineWidth to %f but physical device wideLines feature "
2886                                            "not supported/enabled so lineWidth must be 1.0f!",
2887                             lineWidth);
2888    } else {
2889        // Otherwise, make sure the width falls in the valid range.
2890        if ((my_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) ||
2891            (my_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) {
2892            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target,
2893                                 __LINE__, dsError, "DS", "Attempt to set lineWidth to %f but physical device limits line width "
2894                                                          "to between [%f, %f]!",
2895                                 lineWidth, my_data->phys_dev_properties.properties.limits.lineWidthRange[0],
2896                                 my_data->phys_dev_properties.properties.limits.lineWidthRange[1]);
2897        }
2898    }
2899
2900    return skip_call;
2901}
2902
2903// Verify that create state for a pipeline is valid
2904static bool verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_NODE *> pPipelines,
2905                                      int pipelineIndex) {
2906    bool skipCall = false;
2907
2908    PIPELINE_NODE *pPipeline = pPipelines[pipelineIndex];
2909
2910    // If create derivative bit is set, check that we've specified a base
2911    // pipeline correctly, and that the base pipeline was created to allow
2912    // derivatives.
2913    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
2914        PIPELINE_NODE *pBasePipeline = nullptr;
2915        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
2916              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
2917            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2918                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
2919                                "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
2920        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
2921            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
2922                skipCall |=
2923                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2924                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
2925                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
2926            } else {
2927                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
2928            }
2929        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
2930            pBasePipeline = getPipeline(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
2931        }
2932
2933        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
2934            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2935                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
2936                                "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
2937        }
2938    }
2939
2940    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
2941        if (!my_data->phys_dev_properties.features.independentBlend) {
2942            if (pPipeline->attachments.size() > 1) {
2943                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
2944                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
2945                    if ((pAttachments[0].blendEnable != pAttachments[i].blendEnable) ||
2946                        (pAttachments[0].srcColorBlendFactor != pAttachments[i].srcColorBlendFactor) ||
2947                        (pAttachments[0].dstColorBlendFactor != pAttachments[i].dstColorBlendFactor) ||
2948                        (pAttachments[0].colorBlendOp != pAttachments[i].colorBlendOp) ||
2949                        (pAttachments[0].srcAlphaBlendFactor != pAttachments[i].srcAlphaBlendFactor) ||
2950                        (pAttachments[0].dstAlphaBlendFactor != pAttachments[i].dstAlphaBlendFactor) ||
2951                        (pAttachments[0].alphaBlendOp != pAttachments[i].alphaBlendOp) ||
2952                        (pAttachments[0].colorWriteMask != pAttachments[i].colorWriteMask)) {
2953                        skipCall |=
2954                            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2955                            DRAWSTATE_INDEPENDENT_BLEND, "DS", "Invalid Pipeline CreateInfo: If independent blend feature not "
2956                            "enabled, all elements of pAttachments must be identical");
2957                    }
2958                }
2959            }
2960        }
2961        if (!my_data->phys_dev_properties.features.logicOp &&
2962            (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
2963            skipCall |=
2964                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2965                        DRAWSTATE_DISABLED_LOGIC_OP, "DS",
2966                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE");
2967        }
2968        if ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable == VK_TRUE) &&
2969            ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOp < VK_LOGIC_OP_CLEAR) ||
2970             (pPipeline->graphicsPipelineCI.pColorBlendState->logicOp > VK_LOGIC_OP_SET))) {
2971            skipCall |=
2972                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2973                        DRAWSTATE_INVALID_LOGIC_OP, "DS",
2974                        "Invalid Pipeline CreateInfo: If logicOpEnable is VK_TRUE, logicOp must be a valid VkLogicOp value");
2975        }
2976    }
2977
2978    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
2979    // produces nonsense errors that confuse users. Other layers should already
2980    // emit errors for renderpass being invalid.
2981    auto rp_data = my_data->renderPassMap.find(pPipeline->graphicsPipelineCI.renderPass);
2982    if (rp_data != my_data->renderPassMap.end() &&
2983        pPipeline->graphicsPipelineCI.subpass >= rp_data->second->pCreateInfo->subpassCount) {
2984        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2985                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
2986                                                                           "is out of range for this renderpass (0..%u)",
2987                            pPipeline->graphicsPipelineCI.subpass, rp_data->second->pCreateInfo->subpassCount - 1);
2988    }
2989
2990    if (!validate_and_capture_pipeline_shader_state(my_data, pPipeline)) {
2991        skipCall = true;
2992    }
2993    // Each shader's stage must be unique
2994    if (pPipeline->duplicate_shaders) {
2995        for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
2996            if (pPipeline->duplicate_shaders & stage) {
2997                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2998                                    __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
2999                                    "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
3000                                    string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
3001            }
3002        }
3003    }
3004    // VS is required
3005    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3006        skipCall |=
3007            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3008                    DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vtx Shader required");
3009    }
3010    // Either both or neither TC/TE shaders should be defined
3011    if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) !=
3012        ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) {
3013        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3014                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3015                            "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair");
3016    }
3017    // Compute shaders should be specified independent of Gfx shaders
3018    if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) &&
3019        (pPipeline->active_shaders &
3020         (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
3021          VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) {
3022        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3023                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3024                            "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline");
3025    }
3026    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3027    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3028    if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3029        (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
3030         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3031        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3032                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3033                                                                           "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3034                                                                           "topology for tessellation pipelines");
3035    }
3036    if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
3037        pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3038        if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3039            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3040                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3041                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3042                                                                               "topology is only valid for tessellation pipelines");
3043        }
3044        if (!pPipeline->graphicsPipelineCI.pTessellationState) {
3045            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3046                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3047                                "Invalid Pipeline CreateInfo State: "
3048                                "pTessellationState is NULL when VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3049                                "topology used. pTessellationState must not be NULL in this case.");
3050        } else if (!pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints ||
3051                   (pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints > 32)) {
3052            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3053                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3054                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3055                                                                               "topology used with patchControlPoints value %u."
3056                                                                               " patchControlPoints should be >0 and <=32.",
3057                                pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints);
3058        }
3059    }
3060    // If a rasterization state is provided, make sure that the line width conforms to the HW.
3061    if (pPipeline->graphicsPipelineCI.pRasterizationState) {
3062        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) {
3063            skipCall |= verifyLineWidth(my_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, reinterpret_cast<uint64_t &>(pPipeline),
3064                                        pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth);
3065        }
3066    }
3067    // Viewport state must be included if rasterization is enabled.
3068    // If the viewport state is included, the viewport and scissor counts should always match.
3069    // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler
3070    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3071        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3072        if (!pPipeline->graphicsPipelineCI.pViewportState) {
3073            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3074                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport "
3075                                                                           "and scissors are dynamic PSO must include "
3076                                                                           "viewportCount and scissorCount in pViewportState.");
3077        } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount !=
3078                   pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
3079            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3080                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3081                                "Gfx Pipeline viewport count (%u) must match scissor count (%u).",
3082                                pPipeline->graphicsPipelineCI.pViewportState->viewportCount,
3083                                pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3084        } else {
3085            // If viewport or scissor are not dynamic, then verify that data is appropriate for count
3086            bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3087            bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3088            if (!dynViewport) {
3089                if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount &&
3090                    !pPipeline->graphicsPipelineCI.pViewportState->pViewports) {
3091                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3092                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3093                                        "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you "
3094                                        "must either include pViewports data, or include viewport in pDynamicState and set it with "
3095                                        "vkCmdSetViewport().",
3096                                        pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
3097                }
3098            }
3099            if (!dynScissor) {
3100                if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount &&
3101                    !pPipeline->graphicsPipelineCI.pViewportState->pScissors) {
3102                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3103                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3104                                        "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you "
3105                                        "must either include pScissors data, or include scissor in pDynamicState and set it with "
3106                                        "vkCmdSetScissor().",
3107                                        pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3108                }
3109            }
3110        }
3111    }
3112    return skipCall;
3113}
3114
3115// Free the Pipeline nodes
3116static void deletePipelines(layer_data *my_data) {
3117    if (my_data->pipelineMap.size() <= 0)
3118        return;
3119    for (auto &pipe_map_pair : my_data->pipelineMap) {
3120        delete pipe_map_pair.second;
3121    }
3122    my_data->pipelineMap.clear();
3123}
3124
3125// For given pipeline, return number of MSAA samples, or one if MSAA disabled
3126static VkSampleCountFlagBits getNumSamples(layer_data *my_data, const VkPipeline pipeline) {
3127    PIPELINE_NODE *pPipe = my_data->pipelineMap[pipeline];
3128    if (pPipe->graphicsPipelineCI.pMultisampleState &&
3129        (VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pPipe->graphicsPipelineCI.pMultisampleState->sType)) {
3130        return pPipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
3131    }
3132    return VK_SAMPLE_COUNT_1_BIT;
3133}
3134
3135// Validate state related to the PSO
3136static bool validatePipelineState(layer_data *my_data, const GLOBAL_CB_NODE *pCB, const VkPipelineBindPoint pipelineBindPoint,
3137                                  const VkPipeline pipeline) {
3138    bool skipCall = false;
3139    if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
3140        // Verify that any MSAA request in PSO matches sample# in bound FB
3141        // Skip the check if rasterization is disabled.
3142        PIPELINE_NODE *pPipeline = my_data->pipelineMap[pipeline];
3143        if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3144            (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3145            VkSampleCountFlagBits psoNumSamples = getNumSamples(my_data, pipeline);
3146            if (pCB->activeRenderPass) {
3147                const VkRenderPassCreateInfo *pRPCI = my_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo;
3148                const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
3149                VkSampleCountFlagBits subpassNumSamples = (VkSampleCountFlagBits)0;
3150                uint32_t i;
3151
3152                const VkPipelineColorBlendStateCreateInfo *pColorBlendState = pPipeline->graphicsPipelineCI.pColorBlendState;
3153                if ((pColorBlendState != NULL) && (pCB->activeSubpass == pPipeline->graphicsPipelineCI.subpass) &&
3154                    (pColorBlendState->attachmentCount != pSD->colorAttachmentCount)) {
3155                    return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3156                                   reinterpret_cast<const uint64_t &>(pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
3157                                   "Render pass subpass %u mismatch with blending state defined  and blend state attachment "
3158                                   "count %u but subpass color attachment count %u!  These must be the same.",
3159                                   pCB->activeSubpass, pColorBlendState->attachmentCount, pSD->colorAttachmentCount);
3160                }
3161
3162                for (i = 0; i < pSD->colorAttachmentCount; i++) {
3163                    VkSampleCountFlagBits samples;
3164
3165                    if (pSD->pColorAttachments[i].attachment == VK_ATTACHMENT_UNUSED)
3166                        continue;
3167
3168                    samples = pRPCI->pAttachments[pSD->pColorAttachments[i].attachment].samples;
3169                    if (subpassNumSamples == (VkSampleCountFlagBits)0) {
3170                        subpassNumSamples = samples;
3171                    } else if (subpassNumSamples != samples) {
3172                        subpassNumSamples = (VkSampleCountFlagBits)-1;
3173                        break;
3174                    }
3175                }
3176                if (pSD->pDepthStencilAttachment && pSD->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3177                    const VkSampleCountFlagBits samples = pRPCI->pAttachments[pSD->pDepthStencilAttachment->attachment].samples;
3178                    if (subpassNumSamples == (VkSampleCountFlagBits)0)
3179                        subpassNumSamples = samples;
3180                    else if (subpassNumSamples != samples)
3181                        subpassNumSamples = (VkSampleCountFlagBits)-1;
3182                }
3183
3184                if ((pSD->colorAttachmentCount > 0 || pSD->pDepthStencilAttachment) &&
3185                    psoNumSamples != subpassNumSamples) {
3186                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3187                                        (uint64_t)pipeline, __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
3188                                        "Num samples mismatch! Binding PSO (%#" PRIxLEAST64
3189                                        ") with %u samples while current RenderPass (%#" PRIxLEAST64 ") w/ %u samples!",
3190                                        (uint64_t)pipeline, psoNumSamples, (uint64_t)pCB->activeRenderPass, subpassNumSamples);
3191                }
3192            } else {
3193                // TODO : I believe it's an error if we reach this point and don't have an activeRenderPass
3194                //   Verify and flag error as appropriate
3195            }
3196        }
3197        // TODO : Add more checks here
3198    } else {
3199        // TODO : Validate non-gfx pipeline updates
3200    }
3201    return skipCall;
3202}
3203
3204// Block of code at start here specifically for managing/tracking DSs
3205
3206// Return Pool node ptr for specified pool or else NULL
3207static DESCRIPTOR_POOL_NODE *getPoolNode(layer_data *my_data, const VkDescriptorPool pool) {
3208    if (my_data->descriptorPoolMap.find(pool) == my_data->descriptorPoolMap.end()) {
3209        return NULL;
3210    }
3211    return my_data->descriptorPoolMap[pool];
3212}
3213
3214// Return false if update struct is of valid type, otherwise flag error and return code from callback
3215static bool validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3216    switch (pUpdateStruct->sType) {
3217    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3218    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3219        return false;
3220    default:
3221        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3222                       DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3223                       "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3224                       string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3225    }
3226}
3227
3228// Set count for given update struct in the last parameter
3229static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3230    switch (pUpdateStruct->sType) {
3231    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3232        return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3233    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3234        // TODO : Need to understand this case better and make sure code is correct
3235        return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3236    default:
3237        return 0;
3238    }
3239}
3240
3241// For given layout and update, return the first overall index of the layout that is updated
3242static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3243                                    const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3244    return binding_start_index + arrayIndex;
3245}
3246// For given layout and update, return the last overall index of the layout that is updated
3247static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3248                                  const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3249    uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3250    return binding_start_index + arrayIndex + count - 1;
3251}
3252// Verify that the descriptor type in the update struct matches what's expected by the layout
3253static bool validateUpdateConsistency(layer_data *my_data, const VkDevice device, const VkDescriptorType layout_type,
3254                                      const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3255    // First get actual type of update
3256    bool skipCall = false;
3257    VkDescriptorType actualType = VK_DESCRIPTOR_TYPE_MAX_ENUM;
3258    switch (pUpdateStruct->sType) {
3259    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3260        actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3261        break;
3262    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3263        /* no need to validate */
3264        return false;
3265        break;
3266    default:
3267        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3268                            DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3269                            "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3270                            string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3271    }
3272    if (!skipCall) {
3273        if (layout_type != actualType) {
3274            skipCall |= log_msg(
3275                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3276                DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3277                "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3278                string_VkDescriptorType(actualType), string_VkDescriptorType(layout_type));
3279        }
3280    }
3281    return skipCall;
3282}
3283
3284// Determine the update type, allocate a new struct of that type, shadow the given pUpdate
3285//   struct into the pNewNode param. Return true if error condition encountered and callback signals early exit.
3286// NOTE : Calls to this function should be wrapped in mutex
3287static bool shadowUpdateNode(layer_data *my_data, const VkDevice device, GENERIC_HEADER *pUpdate, GENERIC_HEADER **pNewNode) {
3288    bool skipCall = false;
3289    VkWriteDescriptorSet *pWDS = NULL;
3290    VkCopyDescriptorSet *pCDS = NULL;
3291    switch (pUpdate->sType) {
3292    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3293        pWDS = new VkWriteDescriptorSet;
3294        *pNewNode = (GENERIC_HEADER *)pWDS;
3295        memcpy(pWDS, pUpdate, sizeof(VkWriteDescriptorSet));
3296
3297        switch (pWDS->descriptorType) {
3298        case VK_DESCRIPTOR_TYPE_SAMPLER:
3299        case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3300        case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3301        case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
3302            VkDescriptorImageInfo *info = new VkDescriptorImageInfo[pWDS->descriptorCount];
3303            memcpy(info, pWDS->pImageInfo, pWDS->descriptorCount * sizeof(VkDescriptorImageInfo));
3304            pWDS->pImageInfo = info;
3305        } break;
3306        case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
3307        case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
3308            VkBufferView *info = new VkBufferView[pWDS->descriptorCount];
3309            memcpy(info, pWDS->pTexelBufferView, pWDS->descriptorCount * sizeof(VkBufferView));
3310            pWDS->pTexelBufferView = info;
3311        } break;
3312        case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3313        case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3314        case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
3315        case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
3316            VkDescriptorBufferInfo *info = new VkDescriptorBufferInfo[pWDS->descriptorCount];
3317            memcpy(info, pWDS->pBufferInfo, pWDS->descriptorCount * sizeof(VkDescriptorBufferInfo));
3318            pWDS->pBufferInfo = info;
3319        } break;
3320        default:
3321            return true;
3322            break;
3323        }
3324        break;
3325    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3326        pCDS = new VkCopyDescriptorSet;
3327        *pNewNode = (GENERIC_HEADER *)pCDS;
3328        memcpy(pCDS, pUpdate, sizeof(VkCopyDescriptorSet));
3329        break;
3330    default:
3331        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3332                    DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3333                    "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3334                    string_VkStructureType(pUpdate->sType), pUpdate->sType))
3335            return true;
3336    }
3337    // Make sure that pNext for the end of shadow copy is NULL
3338    (*pNewNode)->pNext = NULL;
3339    return skipCall;
3340}
3341
3342// Verify that given sampler is valid
3343static bool validateSampler(const layer_data *my_data, const VkSampler *pSampler, const bool immutable) {
3344    bool skipCall = false;
3345    auto sampIt = my_data->sampleMap.find(*pSampler);
3346    if (sampIt == my_data->sampleMap.end()) {
3347        if (!immutable) {
3348            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3349                                (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS",
3350                                "vkUpdateDescriptorSets: Attempt to update descriptor with invalid sampler %#" PRIxLEAST64,
3351                                (uint64_t)*pSampler);
3352        } else { // immutable
3353            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3354                                (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS",
3355                                "vkUpdateDescriptorSets: Attempt to update descriptor whose binding has an invalid immutable "
3356                                "sampler %#" PRIxLEAST64,
3357                                (uint64_t)*pSampler);
3358        }
3359    } else {
3360        // TODO : Any further checks we want to do on the sampler?
3361    }
3362    return skipCall;
3363}
3364
3365//TODO: Consolidate functions
3366bool FindLayout(const GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) {
3367    layer_data *my_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
3368    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3369        return false;
3370    }
3371    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3372    imgpair.subresource.aspectMask = aspectMask;
3373    auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3374    if (imgsubIt == pCB->imageLayoutMap.end()) {
3375        return false;
3376    }
3377    if (node.layout != VK_IMAGE_LAYOUT_MAX_ENUM && node.layout != imgsubIt->second.layout) {
3378        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3379                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3380                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3381                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.layout), string_VkImageLayout(imgsubIt->second.layout));
3382    }
3383    if (node.initialLayout != VK_IMAGE_LAYOUT_MAX_ENUM && node.initialLayout != imgsubIt->second.initialLayout) {
3384        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3385                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3386                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple initial layout types: %s and %s",
3387                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.initialLayout), string_VkImageLayout(imgsubIt->second.initialLayout));
3388    }
3389    node = imgsubIt->second;
3390    return true;
3391}
3392
3393bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) {
3394    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3395        return false;
3396    }
3397    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3398    imgpair.subresource.aspectMask = aspectMask;
3399    auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3400    if (imgsubIt == my_data->imageLayoutMap.end()) {
3401        return false;
3402    }
3403    if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) {
3404        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3405                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3406                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3407                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(layout), string_VkImageLayout(imgsubIt->second.layout));
3408    }
3409    layout = imgsubIt->second.layout;
3410    return true;
3411}
3412
3413// find layout(s) on the cmd buf level
3414bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3415    ImageSubresourcePair imgpair = {image, true, range};
3416    node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM);
3417    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT);
3418    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_DEPTH_BIT);
3419    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_STENCIL_BIT);
3420    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_METADATA_BIT);
3421    if (node.layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3422        imgpair = {image, false, VkImageSubresource()};
3423        auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3424        if (imgsubIt == pCB->imageLayoutMap.end())
3425            return false;
3426        node = imgsubIt->second;
3427    }
3428    return true;
3429}
3430
3431// find layout(s) on the global level
3432bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3433    layout = VK_IMAGE_LAYOUT_MAX_ENUM;
3434    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3435    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3436    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3437    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3438    if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3439        imgpair = {imgpair.image, false, VkImageSubresource()};
3440        auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3441        if (imgsubIt == my_data->imageLayoutMap.end())
3442            return false;
3443        layout = imgsubIt->second.layout;
3444    }
3445    return true;
3446}
3447
3448bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3449    ImageSubresourcePair imgpair = {image, true, range};
3450    return FindLayout(my_data, imgpair, layout);
3451}
3452
3453bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3454    auto sub_data = my_data->imageSubresourceMap.find(image);
3455    if (sub_data == my_data->imageSubresourceMap.end())
3456        return false;
3457    auto imgIt = my_data->imageMap.find(image);
3458    if (imgIt == my_data->imageMap.end())
3459        return false;
3460    bool ignoreGlobal = false;
3461    // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3462    // potential errors in this case.
3463    if (sub_data->second.size() >= (imgIt->second.createInfo.arrayLayers * imgIt->second.createInfo.mipLevels + 1)) {
3464        ignoreGlobal = true;
3465    }
3466    for (auto imgsubpair : sub_data->second) {
3467        if (ignoreGlobal && !imgsubpair.hasSubresource)
3468            continue;
3469        auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3470        if (img_data != my_data->imageLayoutMap.end()) {
3471            layouts.push_back(img_data->second.layout);
3472        }
3473    }
3474    return true;
3475}
3476
3477// Set the layout on the global level
3478void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3479    VkImage &image = imgpair.image;
3480    // TODO (mlentine): Maybe set format if new? Not used atm.
3481    my_data->imageLayoutMap[imgpair].layout = layout;
3482    // TODO (mlentine): Maybe make vector a set?
3483    auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3484    if (subresource == my_data->imageSubresourceMap[image].end()) {
3485        my_data->imageSubresourceMap[image].push_back(imgpair);
3486    }
3487}
3488
3489// Set the layout on the cmdbuf level
3490void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3491    pCB->imageLayoutMap[imgpair] = node;
3492    // TODO (mlentine): Maybe make vector a set?
3493    auto subresource =
3494        std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair);
3495    if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) {
3496        pCB->imageSubresourceMap[imgpair.image].push_back(imgpair);
3497    }
3498}
3499
3500void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3501    // TODO (mlentine): Maybe make vector a set?
3502    if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) !=
3503        pCB->imageSubresourceMap[imgpair.image].end()) {
3504        pCB->imageLayoutMap[imgpair].layout = layout;
3505    } else {
3506        // TODO (mlentine): Could be expensive and might need to be removed.
3507        assert(imgpair.hasSubresource);
3508        IMAGE_CMD_BUF_LAYOUT_NODE node;
3509        if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) {
3510            node.initialLayout = layout;
3511        }
3512        SetLayout(pCB, imgpair, {node.initialLayout, layout});
3513    }
3514}
3515
3516template <class OBJECT, class LAYOUT>
3517void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
3518    if (imgpair.subresource.aspectMask & aspectMask) {
3519        imgpair.subresource.aspectMask = aspectMask;
3520        SetLayout(pObject, imgpair, layout);
3521    }
3522}
3523
3524template <class OBJECT, class LAYOUT>
3525void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
3526    ImageSubresourcePair imgpair = {image, true, range};
3527    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3528    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3529    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3530    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3531}
3532
3533template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) {
3534    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3535    SetLayout(pObject, image, imgpair, layout);
3536}
3537
3538void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
3539    auto image_view_data = dev_data->imageViewMap.find(imageView);
3540    assert(image_view_data != dev_data->imageViewMap.end());
3541    const VkImage &image = image_view_data->second.image;
3542    const VkImageSubresourceRange &subRange = image_view_data->second.subresourceRange;
3543    // TODO: Do not iterate over every possibility - consolidate where possible
3544    for (uint32_t j = 0; j < subRange.levelCount; j++) {
3545        uint32_t level = subRange.baseMipLevel + j;
3546        for (uint32_t k = 0; k < subRange.layerCount; k++) {
3547            uint32_t layer = subRange.baseArrayLayer + k;
3548            VkImageSubresource sub = {subRange.aspectMask, level, layer};
3549            SetLayout(pCB, image, sub, layout);
3550        }
3551    }
3552}
3553
3554// Verify that given imageView is valid
3555static bool validateImageView(const layer_data *my_data, const VkImageView *pImageView, const VkImageLayout imageLayout) {
3556    bool skipCall = false;
3557    auto ivIt = my_data->imageViewMap.find(*pImageView);
3558    if (ivIt == my_data->imageViewMap.end()) {
3559        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3560                            (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3561                            "vkUpdateDescriptorSets: Attempt to update descriptor with invalid imageView %#" PRIxLEAST64,
3562                            (uint64_t)*pImageView);
3563    } else {
3564        // Validate that imageLayout is compatible with aspectMask and image format
3565        VkImageAspectFlags aspectMask = ivIt->second.subresourceRange.aspectMask;
3566        VkImage image = ivIt->second.image;
3567        // TODO : Check here in case we have a bad image
3568        VkFormat format = VK_FORMAT_MAX_ENUM;
3569        auto imgIt = my_data->imageMap.find(image);
3570        if (imgIt != my_data->imageMap.end()) {
3571            format = (*imgIt).second.createInfo.format;
3572        } else {
3573            // Also need to check the swapchains.
3574            auto swapchainIt = my_data->device_extensions.imageToSwapchainMap.find(image);
3575            if (swapchainIt != my_data->device_extensions.imageToSwapchainMap.end()) {
3576                VkSwapchainKHR swapchain = swapchainIt->second;
3577                auto swapchain_nodeIt = my_data->device_extensions.swapchainMap.find(swapchain);
3578                if (swapchain_nodeIt != my_data->device_extensions.swapchainMap.end()) {
3579                    SWAPCHAIN_NODE *pswapchain_node = swapchain_nodeIt->second;
3580                    format = pswapchain_node->createInfo.imageFormat;
3581                }
3582            }
3583        }
3584        if (format == VK_FORMAT_MAX_ENUM) {
3585            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3586                                (uint64_t)image, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3587                                "vkUpdateDescriptorSets: Attempt to update descriptor with invalid image %#" PRIxLEAST64
3588                                " in imageView %#" PRIxLEAST64,
3589                                (uint64_t)image, (uint64_t)*pImageView);
3590        } else {
3591            bool ds = vk_format_is_depth_or_stencil(format);
3592            switch (imageLayout) {
3593            case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
3594                // Only Color bit must be set
3595                if ((aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) {
3596                    skipCall |=
3597                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3598                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3599                                "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL "
3600                                "and imageView %#" PRIxLEAST64 ""
3601                                " that does not have VK_IMAGE_ASPECT_COLOR_BIT set.",
3602                                (uint64_t)*pImageView);
3603                }
3604                // format must NOT be DS
3605                if (ds) {
3606                    skipCall |=
3607                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3608                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3609                                "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL "
3610                                "and imageView %#" PRIxLEAST64 ""
3611                                " but the image format is %s which is not a color format.",
3612                                (uint64_t)*pImageView, string_VkFormat(format));
3613                }
3614                break;
3615            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
3616            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
3617                // Depth or stencil bit must be set, but both must NOT be set
3618                if (aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
3619                    if (aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) {
3620                        // both  must NOT be set
3621                        skipCall |=
3622                            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3623                                    (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3624                                    "vkUpdateDescriptorSets: Updating descriptor with imageView %#" PRIxLEAST64 ""
3625                                    " that has both STENCIL and DEPTH aspects set",
3626                                    (uint64_t)*pImageView);
3627                    }
3628                } else if (!(aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
3629                    // Neither were set
3630                    skipCall |=
3631                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3632                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3633                                "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 ""
3634                                " that does not have STENCIL or DEPTH aspect set.",
3635                                string_VkImageLayout(imageLayout), (uint64_t)*pImageView);
3636                }
3637                // format must be DS
3638                if (!ds) {
3639                    skipCall |=
3640                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3641                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3642                                "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 ""
3643                                " but the image format is %s which is not a depth/stencil format.",
3644                                string_VkImageLayout(imageLayout), (uint64_t)*pImageView, string_VkFormat(format));
3645                }
3646                break;
3647            default:
3648                // anything to check for other layouts?
3649                break;
3650            }
3651        }
3652    }
3653    return skipCall;
3654}
3655
3656// Verify that given bufferView is valid
3657static bool validateBufferView(const layer_data *my_data, const VkBufferView *pBufferView) {
3658    bool skipCall = false;
3659    auto sampIt = my_data->bufferViewMap.find(*pBufferView);
3660    if (sampIt == my_data->bufferViewMap.end()) {
3661        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT,
3662                            (uint64_t)*pBufferView, __LINE__, DRAWSTATE_BUFFERVIEW_DESCRIPTOR_ERROR, "DS",
3663                            "vkUpdateDescriptorSets: Attempt to update descriptor with invalid bufferView %#" PRIxLEAST64,
3664                            (uint64_t)*pBufferView);
3665    } else {
3666        // TODO : Any further checks we want to do on the bufferView?
3667    }
3668    return skipCall;
3669}
3670
3671// Verify that given bufferInfo is valid
3672static bool validateBufferInfo(const layer_data *my_data, const VkDescriptorBufferInfo *pBufferInfo) {
3673    bool skipCall = false;
3674    auto sampIt = my_data->bufferMap.find(pBufferInfo->buffer);
3675    if (sampIt == my_data->bufferMap.end()) {
3676        skipCall |=
3677            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3678                    (uint64_t)pBufferInfo->buffer, __LINE__, DRAWSTATE_BUFFERINFO_DESCRIPTOR_ERROR, "DS",
3679                    "vkUpdateDescriptorSets: Attempt to update descriptor where bufferInfo has invalid buffer %#" PRIxLEAST64,
3680                    (uint64_t)pBufferInfo->buffer);
3681    } else {
3682        // TODO : Any further checks we want to do on the bufferView?
3683    }
3684    return skipCall;
3685}
3686
3687static bool validateUpdateContents(const layer_data *my_data, const VkWriteDescriptorSet *pWDS,
3688                                   const VkSampler *pImmutableSamplers) {
3689    bool skipCall = false;
3690    // First verify that for the given Descriptor type, the correct DescriptorInfo data is supplied
3691    const VkSampler *pSampler = NULL;
3692    bool immutable = false;
3693    uint32_t i = 0;
3694    // For given update type, verify that update contents are correct
3695    switch (pWDS->descriptorType) {
3696    case VK_DESCRIPTOR_TYPE_SAMPLER:
3697        for (i = 0; i < pWDS->descriptorCount; ++i) {
3698            skipCall |= validateSampler(my_data, &(pWDS->pImageInfo[i].sampler), immutable);
3699        }
3700        break;
3701    case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3702        for (i = 0; i < pWDS->descriptorCount; ++i) {
3703            if (NULL == pImmutableSamplers) {
3704                pSampler = &(pWDS->pImageInfo[i].sampler);
3705                if (immutable) {
3706                    skipCall |= log_msg(
3707                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3708                        (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS",
3709                        "vkUpdateDescriptorSets: Update #%u is not an immutable sampler %#" PRIxLEAST64
3710                        ", but previous update(s) from this "
3711                        "VkWriteDescriptorSet struct used an immutable sampler. All updates from a single struct must either "
3712                        "use immutable or non-immutable samplers.",
3713                        i, (uint64_t)*pSampler);
3714                }
3715            } else {
3716                if (i > 0 && !immutable) {
3717                    skipCall |= log_msg(
3718                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3719                        (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS",
3720                        "vkUpdateDescriptorSets: Update #%u is an immutable sampler, but previous update(s) from this "
3721                        "VkWriteDescriptorSet struct used a non-immutable sampler. All updates from a single struct must either "
3722                        "use immutable or non-immutable samplers.",
3723                        i);
3724                }
3725                immutable = true;
3726                pSampler = &(pImmutableSamplers[i]);
3727            }
3728            skipCall |= validateSampler(my_data, pSampler, immutable);
3729        }
3730    // Intentionally fall through here to also validate image stuff
3731    case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3732    case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
3733    case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
3734        for (i = 0; i < pWDS->descriptorCount; ++i) {
3735            skipCall |= validateImageView(my_data, &(pWDS->pImageInfo[i].imageView), pWDS->pImageInfo[i].imageLayout);
3736        }
3737        break;
3738    case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
3739    case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
3740        for (i = 0; i < pWDS->descriptorCount; ++i) {
3741            skipCall |= validateBufferView(my_data, &(pWDS->pTexelBufferView[i]));
3742        }
3743        break;
3744    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3745    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3746    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
3747    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
3748        for (i = 0; i < pWDS->descriptorCount; ++i) {
3749            skipCall |= validateBufferInfo(my_data, &(pWDS->pBufferInfo[i]));
3750        }
3751        break;
3752    default:
3753        break;
3754    }
3755    return skipCall;
3756}
3757// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3758// func_str is the name of the calling function
3759// Return false if no errors occur
3760// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
3761static bool validateIdleDescriptorSet(const layer_data *my_data, VkDescriptorSet set, std::string func_str) {
3762    bool skip_call = false;
3763    auto set_node = my_data->setMap.find(set);
3764    if (set_node == my_data->setMap.end()) {
3765        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3766                             (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3767                             "Cannot call %s() on descriptor set %" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3768                             (uint64_t)(set));
3769    } else {
3770        if (set_node->second->in_use.load()) {
3771            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3772                                 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_OBJECT_INUSE,
3773                                 "DS", "Cannot call %s() on descriptor set %" PRIxLEAST64 " that is in use by a command buffer.",
3774                                 func_str.c_str(), (uint64_t)(set));
3775        }
3776    }
3777    return skip_call;
3778}
3779static void invalidateBoundCmdBuffers(layer_data *dev_data, const SET_NODE *pSet) {
3780    // Flag any CBs this set is bound to as INVALID
3781    for (auto cb : pSet->boundCmdBuffers) {
3782        auto cb_node = dev_data->commandBufferMap.find(cb);
3783        if (cb_node != dev_data->commandBufferMap.end()) {
3784            cb_node->second->state = CB_INVALID;
3785        }
3786    }
3787}
3788// update DS mappings based on write and copy update arrays
3789static bool dsUpdate(layer_data *my_data, VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pWDS,
3790                     uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pCDS) {
3791    bool skipCall = false;
3792    // Validate Write updates
3793    uint32_t i = 0;
3794    for (i = 0; i < descriptorWriteCount; i++) {
3795        VkDescriptorSet ds = pWDS[i].dstSet;
3796        SET_NODE *pSet = my_data->setMap[ds];
3797        // Set being updated cannot be in-flight
3798        if ((skipCall = validateIdleDescriptorSet(my_data, ds, "VkUpdateDescriptorSets")) == true)
3799            return skipCall;
3800        // If set is bound to any cmdBuffers, mark them invalid
3801        invalidateBoundCmdBuffers(my_data, pSet);
3802        GENERIC_HEADER *pUpdate = (GENERIC_HEADER *)&pWDS[i];
3803        auto layout_node = pSet->p_layout;
3804        // First verify valid update struct
3805        if ((skipCall = validUpdateStruct(my_data, device, pUpdate)) == true) {
3806            break;
3807        }
3808        uint32_t binding = 0, endIndex = 0;
3809        binding = pWDS[i].dstBinding;
3810        // Make sure that layout being updated has the binding being updated
3811        if (!layout_node->HasBinding(binding)) {
3812            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3813                                (uint64_t)(ds), __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
3814                                "Descriptor Set %" PRIu64 " does not have binding to match "
3815                                "update binding %u for update type "
3816                                "%s!",
3817                                (uint64_t)(ds), binding, string_VkStructureType(pUpdate->sType));
3818        } else {
3819            // Next verify that update falls within size of given binding
3820            endIndex = getUpdateEndIndex(my_data, device, layout_node->GetGlobalStartIndexFromBinding(binding),
3821                                         pWDS[i].dstArrayElement, pUpdate);
3822            if (layout_node->GetGlobalEndIndexFromBinding(binding) < endIndex) {
3823                auto ds_layout = layout_node->GetDescriptorSetLayout();
3824                skipCall |=
3825                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3826                            reinterpret_cast<uint64_t &>(ds), __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
3827                            "Descriptor update type of %s is out of bounds for matching binding %u in Layout %" PRIu64 "!",
3828                            string_VkStructureType(pUpdate->sType), binding, reinterpret_cast<uint64_t &>(ds_layout));
3829            } else { // TODO : should we skip update on a type mismatch or force it?
3830                uint32_t startIndex;
3831                startIndex = getUpdateStartIndex(my_data, device, layout_node->GetGlobalStartIndexFromBinding(binding),
3832                                                 pWDS[i].dstArrayElement, pUpdate);
3833                auto layout_binding = layout_node->GetDescriptorSetLayoutBindingPtrFromBinding(binding);
3834                // Layout bindings match w/ update, now verify that update type & stageFlags are the same for entire update
3835                if ((skipCall = validateUpdateConsistency(my_data, device, layout_binding->descriptorType, pUpdate, startIndex,
3836                                                          endIndex)) == false) {
3837                    // The update is within bounds and consistent, but need to
3838                    // make sure contents make sense as well
3839                    if ((skipCall = validateUpdateContents(my_data, &pWDS[i], layout_binding->pImmutableSamplers)) == false) {
3840                        // Update is good. Save the update info
3841                        // Create new update struct for this set's shadow copy
3842                        GENERIC_HEADER *pNewNode = NULL;
3843                        skipCall |= shadowUpdateNode(my_data, device, pUpdate, &pNewNode);
3844                        if (NULL == pNewNode) {
3845                            skipCall |= log_msg(
3846                                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3847                                (uint64_t)(ds), __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
3848                                "Out of memory while attempting to allocate UPDATE struct in vkUpdateDescriptors()");
3849                        } else {
3850                            // Insert shadow node into LL of updates for this set
3851                            pNewNode->pNext = pSet->pUpdateStructs;
3852                            pSet->pUpdateStructs = pNewNode;
3853                            // Now update appropriate descriptor(s) to point to new Update node
3854                            for (uint32_t j = startIndex; j <= endIndex; j++) {
3855                                assert(j < pSet->descriptorCount);
3856                                pSet->pDescriptorUpdates[j] = pNewNode;
3857                            }
3858                        }
3859                    }
3860                }
3861            }
3862        }
3863    }
3864    // Now validate copy updates
3865    for (i = 0; i < descriptorCopyCount; ++i) {
3866        SET_NODE *pSrcSet = NULL, *pDstSet = NULL;
3867        uint32_t srcStartIndex = 0, srcEndIndex = 0, dstStartIndex = 0, dstEndIndex = 0;
3868        // For each copy make sure that update falls within given layout and that types match
3869        pSrcSet = my_data->setMap[pCDS[i].srcSet];
3870        pDstSet = my_data->setMap[pCDS[i].dstSet];
3871        // Set being updated cannot be in-flight
3872        if ((skipCall = validateIdleDescriptorSet(my_data, pDstSet->set, "VkUpdateDescriptorSets")) == true)
3873            return skipCall;
3874        invalidateBoundCmdBuffers(my_data, pDstSet);
3875        auto src_layout_node = pSrcSet->p_layout;
3876        auto dst_layout_node = pDstSet->p_layout;
3877        // Validate that src binding is valid for src set layout
3878        if (!src_layout_node->HasBinding(pCDS[i].srcBinding)) {
3879            auto s_layout = src_layout_node->GetDescriptorSetLayout();
3880            skipCall |=
3881                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3882                        (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
3883                        "Copy descriptor update %u has srcBinding %u "
3884                        "which is out of bounds for underlying SetLayout "
3885                        "%#" PRIxLEAST64 " which only has bindings 0-%u.",
3886                        i, pCDS[i].srcBinding, reinterpret_cast<uint64_t &>(s_layout), src_layout_node->GetBindingCount() - 1);
3887        } else if (!dst_layout_node->HasBinding(pCDS[i].dstBinding)) {
3888            auto d_layout = dst_layout_node->GetDescriptorSetLayout();
3889            skipCall |=
3890                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3891                        (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
3892                        "Copy descriptor update %u has dstBinding %u "
3893                        "which is out of bounds for underlying SetLayout "
3894                        "%#" PRIxLEAST64 " which only has bindings 0-%u.",
3895                        i, pCDS[i].dstBinding, reinterpret_cast<uint64_t &>(d_layout), dst_layout_node->GetBindingCount() - 1);
3896        } else {
3897            // Proceed with validation. Bindings are ok, but make sure update is within bounds of given layout and binding
3898            srcEndIndex = getUpdateEndIndex(my_data, device, src_layout_node->GetGlobalStartIndexFromBinding(pCDS[i].srcBinding),
3899                                            pCDS[i].srcArrayElement, (const GENERIC_HEADER *)&(pCDS[i]));
3900            dstEndIndex = getUpdateEndIndex(my_data, device, dst_layout_node->GetGlobalStartIndexFromBinding(pCDS[i].dstBinding),
3901                                            pCDS[i].dstArrayElement, (const GENERIC_HEADER *)&(pCDS[i]));
3902            if (src_layout_node->GetGlobalEndIndexFromBinding(pCDS[i].srcBinding) < srcEndIndex) {
3903                auto s_layout = src_layout_node->GetDescriptorSetLayout();
3904                skipCall |=
3905                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3906                            (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
3907                            "Copy descriptor src update is out of bounds for matching binding %u in Layout %" PRIu64 "!",
3908                            pCDS[i].srcBinding, reinterpret_cast<uint64_t &>(s_layout));
3909            } else if (dst_layout_node->GetGlobalEndIndexFromBinding(pCDS[i].dstBinding) < dstEndIndex) {
3910                auto d_layout = dst_layout_node->GetDescriptorSetLayout();
3911                skipCall |=
3912                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3913                            (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
3914                            "Copy descriptor dest update is out of bounds for matching binding %u in Layout %" PRIu64 "!",
3915                            pCDS[i].dstBinding, reinterpret_cast<uint64_t &>(d_layout));
3916            } else {
3917                srcStartIndex =
3918                    getUpdateStartIndex(my_data, device, src_layout_node->GetGlobalStartIndexFromBinding(pCDS[i].srcBinding),
3919                                        pCDS[i].srcArrayElement, (const GENERIC_HEADER *)&(pCDS[i]));
3920                dstStartIndex =
3921                    getUpdateStartIndex(my_data, device, dst_layout_node->GetGlobalStartIndexFromBinding(pCDS[i].dstBinding),
3922                                        pCDS[i].dstArrayElement, (const GENERIC_HEADER *)&(pCDS[i]));
3923                auto s_binding = src_layout_node->GetDescriptorSetLayoutBindingPtrFromBinding(pCDS[i].srcBinding);
3924                auto d_binding = dst_layout_node->GetDescriptorSetLayoutBindingPtrFromBinding(pCDS[i].dstBinding);
3925                // For copy, just make sure types match and then perform update
3926                if (s_binding->descriptorType != d_binding->descriptorType) {
3927                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3928                                        __LINE__, DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3929                                        "Copy descriptor update index %u, has src update descriptor type %s "
3930                                        "that does not match overlapping dest descriptor type of %s!",
3931                                        i, string_VkDescriptorType(s_binding->descriptorType),
3932                                        string_VkDescriptorType(d_binding->descriptorType));
3933                } else {
3934                    for (uint32_t j = 0; j < pCDS[i].descriptorCount; ++j) {
3935                        // point dst descriptor at corresponding src descriptor
3936                        // TODO : This may be a hole. I believe copy should be its own copy,
3937                        //  otherwise a subsequent write update to src will incorrectly affect the copy
3938                        pDstSet->pDescriptorUpdates[j + dstStartIndex] = pSrcSet->pDescriptorUpdates[j + srcStartIndex];
3939                        pDstSet->pUpdateStructs = pSrcSet->pUpdateStructs;
3940                    }
3941                }
3942            }
3943        }
3944    }
3945    return skipCall;
3946}
3947
3948// Verify that given pool has descriptors that are being requested for allocation.
3949// NOTE : Calls to this function should be wrapped in mutex
3950static bool validate_descriptor_availability_in_pool(layer_data *dev_data, DESCRIPTOR_POOL_NODE *pPoolNode, uint32_t count,
3951                                                     const VkDescriptorSetLayout *pSetLayouts) {
3952    bool skipCall = false;
3953    uint32_t i = 0;
3954    uint32_t j = 0;
3955
3956    // Track number of descriptorSets allowable in this pool
3957    if (pPoolNode->availableSets < count) {
3958        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
3959                            reinterpret_cast<uint64_t &>(pPoolNode->pool), __LINE__, DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS",
3960                            "Unable to allocate %u descriptorSets from pool %#" PRIxLEAST64
3961                            ". This pool only has %d descriptorSets remaining.",
3962                            count, reinterpret_cast<uint64_t &>(pPoolNode->pool), pPoolNode->availableSets);
3963    } else {
3964        pPoolNode->availableSets -= count;
3965    }
3966
3967    for (i = 0; i < count; ++i) {
3968        auto layout_pair = dev_data->descriptorSetLayoutMap.find(pSetLayouts[i]);
3969        if (layout_pair == dev_data->descriptorSetLayoutMap.end()) {
3970            skipCall |=
3971                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
3972                        (uint64_t)pSetLayouts[i], __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3973                        "Unable to find set layout node for layout %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
3974                        (uint64_t)pSetLayouts[i]);
3975        } else {
3976            uint32_t typeIndex = 0, poolSizeCount = 0;
3977            auto layout_node = layout_pair->second;
3978            for (j = 0; j < layout_node.GetBindingCount(); ++j) {
3979                auto binding_layout = layout_node.GetDescriptorSetLayoutBindingPtrFromIndex(j);
3980                typeIndex = static_cast<uint32_t>(binding_layout->descriptorType);
3981                poolSizeCount = binding_layout->descriptorCount;
3982                if (poolSizeCount > pPoolNode->availableDescriptorTypeCount[typeIndex]) {
3983                    skipCall |= log_msg(
3984                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
3985                        reinterpret_cast<const uint64_t &>(pSetLayouts[i]), __LINE__, DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS",
3986                        "Unable to allocate %u descriptors of type %s from pool %#" PRIxLEAST64
3987                        ". This pool only has %d descriptors of this type remaining.",
3988                        poolSizeCount, string_VkDescriptorType(binding_layout->descriptorType), (uint64_t)pPoolNode->pool,
3989                        pPoolNode->availableDescriptorTypeCount[typeIndex]);
3990                } else { // Decrement available descriptors of this type
3991                    pPoolNode->availableDescriptorTypeCount[typeIndex] -= poolSizeCount;
3992                }
3993            }
3994        }
3995    }
3996    return skipCall;
3997}
3998
3999// Free the shadowed update node for this Set
4000// NOTE : Calls to this function should be wrapped in mutex
4001static void freeShadowUpdateTree(SET_NODE *pSet) {
4002    GENERIC_HEADER *pShadowUpdate = pSet->pUpdateStructs;
4003    pSet->pUpdateStructs = NULL;
4004    GENERIC_HEADER *pFreeUpdate = pShadowUpdate;
4005    // Clear the descriptor mappings as they will now be invalid
4006    pSet->pDescriptorUpdates.clear();
4007    while (pShadowUpdate) {
4008        pFreeUpdate = pShadowUpdate;
4009        pShadowUpdate = (GENERIC_HEADER *)pShadowUpdate->pNext;
4010        VkWriteDescriptorSet *pWDS = NULL;
4011        switch (pFreeUpdate->sType) {
4012        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
4013            pWDS = (VkWriteDescriptorSet *)pFreeUpdate;
4014            switch (pWDS->descriptorType) {
4015            case VK_DESCRIPTOR_TYPE_SAMPLER:
4016            case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
4017            case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
4018            case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
4019                delete[] pWDS->pImageInfo;
4020            } break;
4021            case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
4022            case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
4023                delete[] pWDS->pTexelBufferView;
4024            } break;
4025            case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
4026            case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
4027            case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
4028            case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
4029                delete[] pWDS->pBufferInfo;
4030            } break;
4031            default:
4032                break;
4033            }
4034            break;
4035        case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
4036            break;
4037        default:
4038            assert(0);
4039            break;
4040        }
4041        delete pFreeUpdate;
4042    }
4043}
4044
4045// Free all DS Pools including their Sets & related sub-structs
4046// NOTE : Calls to this function should be wrapped in mutex
4047static void deletePools(layer_data *my_data) {
4048    if (my_data->descriptorPoolMap.size() <= 0)
4049        return;
4050    for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
4051        SET_NODE *pSet = (*ii).second->pSets;
4052        SET_NODE *pFreeSet = pSet;
4053        while (pSet) {
4054            pFreeSet = pSet;
4055            pSet = pSet->pNext;
4056            // Free Update shadow struct tree
4057            freeShadowUpdateTree(pFreeSet);
4058            delete pFreeSet;
4059        }
4060        delete (*ii).second;
4061    }
4062    my_data->descriptorPoolMap.clear();
4063}
4064
4065// Currently clearing a set is removing all previous updates to that set
4066//  TODO : Validate if this is correct clearing behavior
4067static void clearDescriptorSet(layer_data *my_data, VkDescriptorSet set) {
4068    SET_NODE *pSet = getSetNode(my_data, set);
4069    if (!pSet) {
4070        // TODO : Return error
4071    } else {
4072        freeShadowUpdateTree(pSet);
4073    }
4074}
4075
4076static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
4077                                VkDescriptorPoolResetFlags flags) {
4078    DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pool);
4079    if (!pPool) {
4080        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4081                (uint64_t)pool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
4082                "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkResetDescriptorPool() call", (uint64_t)pool);
4083    } else {
4084        // TODO: validate flags
4085        // For every set off of this pool, clear it, remove from setMap, and free SET_NODE
4086        SET_NODE *pSet = pPool->pSets;
4087        SET_NODE *pFreeSet = pSet;
4088        while (pSet) {
4089            clearDescriptorSet(my_data, pSet->set);
4090            my_data->setMap.erase(pSet->set);
4091            pFreeSet = pSet;
4092            pSet = pSet->pNext;
4093            delete pFreeSet;
4094        }
4095        pPool->pSets = nullptr;
4096        // Reset available count for each type and available sets for this pool
4097        for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
4098            pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
4099        }
4100        pPool->availableSets = pPool->maxSets;
4101    }
4102}
4103
4104// For given CB object, fetch associated CB Node from map
4105static GLOBAL_CB_NODE *getCBNode(layer_data *my_data, const VkCommandBuffer cb) {
4106    if (my_data->commandBufferMap.count(cb) == 0) {
4107        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4108                reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4109                "Attempt to use CommandBuffer %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb));
4110        return NULL;
4111    }
4112    return my_data->commandBufferMap[cb];
4113}
4114
4115// Free all CB Nodes
4116// NOTE : Calls to this function should be wrapped in mutex
4117static void deleteCommandBuffers(layer_data *my_data) {
4118    if (my_data->commandBufferMap.empty()) {
4119        return;
4120    }
4121    for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
4122        delete (*ii).second;
4123    }
4124    my_data->commandBufferMap.clear();
4125}
4126
4127static bool report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
4128    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4129                   (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
4130                   "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
4131}
4132
4133bool validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
4134    if (!pCB->activeRenderPass)
4135        return false;
4136    bool skip_call = false;
4137    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS && cmd_type != CMD_EXECUTECOMMANDS) {
4138        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4139                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4140                             "Commands cannot be called in a subpass using secondary command buffers.");
4141    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
4142        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4143                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4144                             "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
4145    }
4146    return skip_call;
4147}
4148
4149static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4150    if (!(flags & VK_QUEUE_GRAPHICS_BIT))
4151        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4152                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4153                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
4154    return false;
4155}
4156
4157static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4158    if (!(flags & VK_QUEUE_COMPUTE_BIT))
4159        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4160                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4161                       "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
4162    return false;
4163}
4164
4165static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4166    if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
4167        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4168                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4169                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
4170    return false;
4171}
4172
4173// Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not
4174//  in the recording state or if there's an issue with the Cmd ordering
4175static bool addCmd(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
4176    bool skipCall = false;
4177    auto pool_data = my_data->commandPoolMap.find(pCB->createInfo.commandPool);
4178    if (pool_data != my_data->commandPoolMap.end()) {
4179        VkQueueFlags flags = my_data->phys_dev_properties.queue_family_properties[pool_data->second.queueFamilyIndex].queueFlags;
4180        switch (cmd) {
4181        case CMD_BINDPIPELINE:
4182        case CMD_BINDPIPELINEDELTA:
4183        case CMD_BINDDESCRIPTORSETS:
4184        case CMD_FILLBUFFER:
4185        case CMD_CLEARCOLORIMAGE:
4186        case CMD_SETEVENT:
4187        case CMD_RESETEVENT:
4188        case CMD_WAITEVENTS:
4189        case CMD_BEGINQUERY:
4190        case CMD_ENDQUERY:
4191        case CMD_RESETQUERYPOOL:
4192        case CMD_COPYQUERYPOOLRESULTS:
4193        case CMD_WRITETIMESTAMP:
4194            skipCall |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
4195            break;
4196        case CMD_SETVIEWPORTSTATE:
4197        case CMD_SETSCISSORSTATE:
4198        case CMD_SETLINEWIDTHSTATE:
4199        case CMD_SETDEPTHBIASSTATE:
4200        case CMD_SETBLENDSTATE:
4201        case CMD_SETDEPTHBOUNDSSTATE:
4202        case CMD_SETSTENCILREADMASKSTATE:
4203        case CMD_SETSTENCILWRITEMASKSTATE:
4204        case CMD_SETSTENCILREFERENCESTATE:
4205        case CMD_BINDINDEXBUFFER:
4206        case CMD_BINDVERTEXBUFFER:
4207        case CMD_DRAW:
4208        case CMD_DRAWINDEXED:
4209        case CMD_DRAWINDIRECT:
4210        case CMD_DRAWINDEXEDINDIRECT:
4211        case CMD_BLITIMAGE:
4212        case CMD_CLEARATTACHMENTS:
4213        case CMD_CLEARDEPTHSTENCILIMAGE:
4214        case CMD_RESOLVEIMAGE:
4215        case CMD_BEGINRENDERPASS:
4216        case CMD_NEXTSUBPASS:
4217        case CMD_ENDRENDERPASS:
4218            skipCall |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
4219            break;
4220        case CMD_DISPATCH:
4221        case CMD_DISPATCHINDIRECT:
4222            skipCall |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
4223            break;
4224        case CMD_COPYBUFFER:
4225        case CMD_COPYIMAGE:
4226        case CMD_COPYBUFFERTOIMAGE:
4227        case CMD_COPYIMAGETOBUFFER:
4228        case CMD_CLONEIMAGEDATA:
4229        case CMD_UPDATEBUFFER:
4230        case CMD_PIPELINEBARRIER:
4231        case CMD_EXECUTECOMMANDS:
4232            break;
4233        default:
4234            break;
4235        }
4236    }
4237    if (pCB->state != CB_RECORDING) {
4238        skipCall |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
4239        skipCall |= validateCmdsInCmdBuffer(my_data, pCB, cmd);
4240        CMD_NODE cmdNode = {};
4241        // init cmd node and append to end of cmd LL
4242        cmdNode.cmdNumber = ++pCB->numCmds;
4243        cmdNode.type = cmd;
4244        pCB->cmds.push_back(cmdNode);
4245    }
4246    return skipCall;
4247}
4248// Reset the command buffer state
4249//  Maintain the createInfo and set state to CB_NEW, but clear all other state
4250static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
4251    GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
4252    if (pCB) {
4253        pCB->in_use.store(0);
4254        pCB->cmds.clear();
4255        // Reset CB state (note that createInfo is not cleared)
4256        pCB->commandBuffer = cb;
4257        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
4258        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
4259        pCB->numCmds = 0;
4260        memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
4261        pCB->state = CB_NEW;
4262        pCB->submitCount = 0;
4263        pCB->status = 0;
4264        pCB->viewports.clear();
4265        pCB->scissors.clear();
4266
4267        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4268            // Before clearing lastBoundState, remove any CB bindings from all uniqueBoundSets
4269            for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4270                auto set_node = dev_data->setMap.find(set);
4271                if (set_node != dev_data->setMap.end()) {
4272                    set_node->second->boundCmdBuffers.erase(pCB->commandBuffer);
4273                }
4274            }
4275            pCB->lastBound[i].reset();
4276        }
4277
4278        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
4279        pCB->activeRenderPass = 0;
4280        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
4281        pCB->activeSubpass = 0;
4282        pCB->lastSubmittedFence = VK_NULL_HANDLE;
4283        pCB->lastSubmittedQueue = VK_NULL_HANDLE;
4284        pCB->destroyedSets.clear();
4285        pCB->updatedSets.clear();
4286        pCB->destroyedFramebuffers.clear();
4287        pCB->waitedEvents.clear();
4288        pCB->semaphores.clear();
4289        pCB->events.clear();
4290        pCB->waitedEventsBeforeQueryReset.clear();
4291        pCB->queryToStateMap.clear();
4292        pCB->activeQueries.clear();
4293        pCB->startedQueries.clear();
4294        pCB->imageSubresourceMap.clear();
4295        pCB->imageLayoutMap.clear();
4296        pCB->eventToStageMap.clear();
4297        pCB->drawData.clear();
4298        pCB->currentDrawData.buffers.clear();
4299        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
4300        // Make sure any secondaryCommandBuffers are removed from globalInFlight
4301        for (auto secondary_cb : pCB->secondaryCommandBuffers) {
4302            dev_data->globalInFlightCmdBuffers.erase(secondary_cb);
4303        }
4304        pCB->secondaryCommandBuffers.clear();
4305        pCB->updateImages.clear();
4306        pCB->updateBuffers.clear();
4307        clear_cmd_buf_and_mem_references(dev_data, pCB);
4308        pCB->eventUpdates.clear();
4309
4310        // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
4311        for (auto framebuffer : pCB->framebuffers) {
4312            auto fbNode = dev_data->frameBufferMap.find(framebuffer);
4313            if (fbNode != dev_data->frameBufferMap.end()) {
4314                fbNode->second.referencingCmdBuffers.erase(pCB->commandBuffer);
4315            }
4316        }
4317        pCB->framebuffers.clear();
4318
4319    }
4320}
4321
4322// Set PSO-related status bits for CB, including dynamic state set via PSO
4323static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe) {
4324    // Account for any dynamic state not set via this PSO
4325    if (!pPipe->graphicsPipelineCI.pDynamicState ||
4326        !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) { // All state is static
4327        pCB->status = CBSTATUS_ALL;
4328    } else {
4329        // First consider all state on
4330        // Then unset any state that's noted as dynamic in PSO
4331        // Finally OR that into CB statemask
4332        CBStatusFlags psoDynStateMask = CBSTATUS_ALL;
4333        for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
4334            switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) {
4335            case VK_DYNAMIC_STATE_VIEWPORT:
4336                psoDynStateMask &= ~CBSTATUS_VIEWPORT_SET;
4337                break;
4338            case VK_DYNAMIC_STATE_SCISSOR:
4339                psoDynStateMask &= ~CBSTATUS_SCISSOR_SET;
4340                break;
4341            case VK_DYNAMIC_STATE_LINE_WIDTH:
4342                psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
4343                break;
4344            case VK_DYNAMIC_STATE_DEPTH_BIAS:
4345                psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
4346                break;
4347            case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
4348                psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
4349                break;
4350            case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
4351                psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
4352                break;
4353            case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
4354                psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
4355                break;
4356            case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
4357                psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
4358                break;
4359            case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
4360                psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
4361                break;
4362            default:
4363                // TODO : Flag error here
4364                break;
4365            }
4366        }
4367        pCB->status |= psoDynStateMask;
4368    }
4369}
4370
4371// Print the last bound Gfx Pipeline
4372static bool printPipeline(layer_data *my_data, const VkCommandBuffer cb) {
4373    bool skipCall = false;
4374    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4375    if (pCB) {
4376        PIPELINE_NODE *pPipeTrav = getPipeline(my_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
4377        if (!pPipeTrav) {
4378            // nothing to print
4379        } else {
4380            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4381                                __LINE__, DRAWSTATE_NONE, "DS", "%s",
4382                                vk_print_vkgraphicspipelinecreateinfo(
4383                                    reinterpret_cast<const VkGraphicsPipelineCreateInfo *>(&pPipeTrav->graphicsPipelineCI), "{DS}")
4384                                    .c_str());
4385        }
4386    }
4387    return skipCall;
4388}
4389
4390static void printCB(layer_data *my_data, const VkCommandBuffer cb) {
4391    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4392    if (pCB && pCB->cmds.size() > 0) {
4393        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4394                DRAWSTATE_NONE, "DS", "Cmds in CB %p", (void *)cb);
4395        vector<CMD_NODE> cmds = pCB->cmds;
4396        for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) {
4397            // TODO : Need to pass cb as srcObj here
4398            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4399                    __LINE__, DRAWSTATE_NONE, "DS", "  CMD#%" PRIu64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str());
4400        }
4401    } else {
4402        // Nothing to print
4403    }
4404}
4405
4406static bool synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
4407    bool skipCall = false;
4408    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
4409        return skipCall;
4410    }
4411    skipCall |= printPipeline(my_data, cb);
4412    return skipCall;
4413}
4414
4415// Flags validation error if the associated call is made inside a render pass. The apiName
4416// routine should ONLY be called outside a render pass.
4417static bool insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4418    bool inside = false;
4419    if (pCB->activeRenderPass) {
4420        inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4421                         (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
4422                         "%s: It is invalid to issue this call inside an active render pass (%#" PRIxLEAST64 ")", apiName,
4423                         (uint64_t)pCB->activeRenderPass);
4424    }
4425    return inside;
4426}
4427
4428// Flags validation error if the associated call is made outside a render pass. The apiName
4429// routine should ONLY be called inside a render pass.
4430static bool outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4431    bool outside = false;
4432    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
4433        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
4434         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
4435        outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4436                          (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS",
4437                          "%s: This call must be issued inside an active render pass.", apiName);
4438    }
4439    return outside;
4440}
4441
4442static void init_core_validation(layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
4443
4444    layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
4445
4446}
4447
4448VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4449vkCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
4450    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4451
4452    assert(chain_info->u.pLayerInfo);
4453    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4454    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
4455    if (fpCreateInstance == NULL)
4456        return VK_ERROR_INITIALIZATION_FAILED;
4457
4458    // Advance the link info for the next element on the chain
4459    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4460
4461    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
4462    if (result != VK_SUCCESS)
4463        return result;
4464
4465    layer_data *instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
4466    instance_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
4467    layer_init_instance_dispatch_table(*pInstance, instance_data->instance_dispatch_table, fpGetInstanceProcAddr);
4468
4469    instance_data->report_data =
4470        debug_report_create_instance(instance_data->instance_dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount,
4471                                     pCreateInfo->ppEnabledExtensionNames);
4472
4473    init_core_validation(instance_data, pAllocator);
4474
4475    ValidateLayerOrdering(*pCreateInfo);
4476
4477    return result;
4478}
4479
4480/* hook DestroyInstance to remove tableInstanceMap entry */
4481VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
4482    // TODOSC : Shouldn't need any customization here
4483    dispatch_key key = get_dispatch_key(instance);
4484    // TBD: Need any locking this early, in case this function is called at the
4485    // same time by more than one thread?
4486    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
4487    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
4488    pTable->DestroyInstance(instance, pAllocator);
4489
4490    std::lock_guard<std::mutex> lock(global_lock);
4491    // Clean up logging callback, if any
4492    while (my_data->logging_callback.size() > 0) {
4493        VkDebugReportCallbackEXT callback = my_data->logging_callback.back();
4494        layer_destroy_msg_callback(my_data->report_data, callback, pAllocator);
4495        my_data->logging_callback.pop_back();
4496    }
4497
4498    layer_debug_report_destroy_instance(my_data->report_data);
4499    delete my_data->instance_dispatch_table;
4500    layer_data_map.erase(key);
4501}
4502
4503static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
4504    uint32_t i;
4505    // TBD: Need any locking, in case this function is called at the same time
4506    // by more than one thread?
4507    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4508    dev_data->device_extensions.wsi_enabled = false;
4509
4510    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4511    PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
4512    pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
4513    pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
4514    pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
4515    pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
4516    pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
4517
4518    for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4519        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
4520            dev_data->device_extensions.wsi_enabled = true;
4521    }
4522}
4523
4524VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
4525                                                              const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
4526    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4527
4528    assert(chain_info->u.pLayerInfo);
4529    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4530    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
4531    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
4532    if (fpCreateDevice == NULL) {
4533        return VK_ERROR_INITIALIZATION_FAILED;
4534    }
4535
4536    // Advance the link info for the next element on the chain
4537    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4538
4539    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
4540    if (result != VK_SUCCESS) {
4541        return result;
4542    }
4543
4544    std::unique_lock<std::mutex> lock(global_lock);
4545    layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
4546    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
4547
4548    // Setup device dispatch table
4549    my_device_data->device_dispatch_table = new VkLayerDispatchTable;
4550    layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr);
4551    my_device_data->device = *pDevice;
4552
4553    my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
4554    createDeviceRegisterExtensions(pCreateInfo, *pDevice);
4555    // Get physical device limits for this device
4556    my_instance_data->instance_dispatch_table->GetPhysicalDeviceProperties(gpu, &(my_device_data->phys_dev_properties.properties));
4557    uint32_t count;
4558    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
4559    my_device_data->phys_dev_properties.queue_family_properties.resize(count);
4560    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(
4561        gpu, &count, &my_device_data->phys_dev_properties.queue_family_properties[0]);
4562    // TODO: device limits should make sure these are compatible
4563    if (pCreateInfo->pEnabledFeatures) {
4564        my_device_data->phys_dev_properties.features = *pCreateInfo->pEnabledFeatures;
4565    } else {
4566        memset(&my_device_data->phys_dev_properties.features, 0, sizeof(VkPhysicalDeviceFeatures));
4567    }
4568    // Store physical device mem limits into device layer_data struct
4569    my_instance_data->instance_dispatch_table->GetPhysicalDeviceMemoryProperties(gpu, &my_device_data->phys_dev_mem_props);
4570    lock.unlock();
4571
4572    ValidateLayerOrdering(*pCreateInfo);
4573
4574    return result;
4575}
4576
4577// prototype
4578static void deleteRenderPasses(layer_data *);
4579VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
4580    // TODOSC : Shouldn't need any customization here
4581    dispatch_key key = get_dispatch_key(device);
4582    layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4583    // Free all the memory
4584    std::unique_lock<std::mutex> lock(global_lock);
4585    deletePipelines(dev_data);
4586    deleteRenderPasses(dev_data);
4587    deleteCommandBuffers(dev_data);
4588    deletePools(dev_data);
4589    dev_data->descriptorSetLayoutMap.clear();
4590    dev_data->imageViewMap.clear();
4591    dev_data->imageMap.clear();
4592    dev_data->imageSubresourceMap.clear();
4593    dev_data->imageLayoutMap.clear();
4594    dev_data->bufferViewMap.clear();
4595    dev_data->bufferMap.clear();
4596    // Queues persist until device is destroyed
4597    dev_data->queueMap.clear();
4598    lock.unlock();
4599#if MTMERGESOURCE
4600    bool skipCall = false;
4601    lock.lock();
4602    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4603            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()");
4604    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4605            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================");
4606    print_mem_list(dev_data);
4607    printCBList(dev_data);
4608    // Report any memory leaks
4609    DEVICE_MEM_INFO *pInfo = NULL;
4610    if (!dev_data->memObjMap.empty()) {
4611        for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
4612            pInfo = &(*ii).second;
4613            if (pInfo->allocInfo.allocationSize != 0) {
4614                // Valid Usage: All child objects created on device must have been destroyed prior to destroying device
4615                skipCall |=
4616                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4617                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK,
4618                            "MEM", "Mem Object %" PRIu64 " has not been freed. You should clean up this memory by calling "
4619                                   "vkFreeMemory(%" PRIu64 ") prior to vkDestroyDevice().",
4620                            (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem));
4621            }
4622        }
4623    }
4624    layer_debug_report_destroy_device(device);
4625    lock.unlock();
4626
4627#if DISPATCH_MAP_DEBUG
4628    fprintf(stderr, "Device: %p, key: %p\n", device, key);
4629#endif
4630    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4631    if (!skipCall) {
4632        pDisp->DestroyDevice(device, pAllocator);
4633    }
4634#else
4635    dev_data->device_dispatch_table->DestroyDevice(device, pAllocator);
4636#endif
4637    delete dev_data->device_dispatch_table;
4638    layer_data_map.erase(key);
4639}
4640
4641static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4642
4643VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4644vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
4645    return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
4646}
4647
4648VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4649vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
4650    return util_GetLayerProperties(ARRAY_SIZE(cv_global_layers), cv_global_layers, pCount, pProperties);
4651}
4652
4653// TODO: Why does this exist - can we just use global?
4654static const VkLayerProperties cv_device_layers[] = {{
4655    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
4656}};
4657
4658VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
4659                                                                                    const char *pLayerName, uint32_t *pCount,
4660                                                                                    VkExtensionProperties *pProperties) {
4661    if (pLayerName == NULL) {
4662        dispatch_key key = get_dispatch_key(physicalDevice);
4663        layer_data *my_data = get_my_data_ptr(key, layer_data_map);
4664        return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
4665    } else {
4666        return util_GetExtensionProperties(0, NULL, pCount, pProperties);
4667    }
4668}
4669
4670VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4671vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
4672    /* draw_state physical device layers are the same as global */
4673    return util_GetLayerProperties(ARRAY_SIZE(cv_device_layers), cv_device_layers, pCount, pProperties);
4674}
4675
4676// This validates that the initial layout specified in the command buffer for
4677// the IMAGE is the same
4678// as the global IMAGE layout
4679static bool ValidateCmdBufImageLayouts(VkCommandBuffer cmdBuffer) {
4680    bool skip_call = false;
4681    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
4682    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
4683    for (auto cb_image_data : pCB->imageLayoutMap) {
4684        VkImageLayout imageLayout;
4685        if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4686            skip_call |=
4687                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4688                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image %" PRIu64 ".",
4689                        reinterpret_cast<const uint64_t &>(cb_image_data.first));
4690        } else {
4691            if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4692                // TODO: Set memory invalid which is in mem_tracker currently
4693            } else if (imageLayout != cb_image_data.second.initialLayout) {
4694                if (cb_image_data.first.hasSubresource) {
4695                    skip_call |= log_msg(
4696                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4697                        reinterpret_cast<uint64_t &>(cmdBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4698                        "Cannot submit cmd buffer using image (%" PRIx64 ") [sub-resource: array layer %u, mip level %u], "
4699                        "with layout %s when first use is %s.",
4700                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), cb_image_data.first.subresource.arrayLayer,
4701                        cb_image_data.first.subresource.mipLevel, string_VkImageLayout(imageLayout),
4702                        string_VkImageLayout(cb_image_data.second.initialLayout));
4703                } else {
4704                    skip_call |= log_msg(
4705                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4706                        reinterpret_cast<uint64_t &>(cmdBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4707                        "Cannot submit cmd buffer using image (%" PRIx64 ") with layout %s when "
4708                        "first use is %s.",
4709                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout),
4710                        string_VkImageLayout(cb_image_data.second.initialLayout));
4711                }
4712            }
4713            SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4714        }
4715    }
4716    return skip_call;
4717}
4718
4719// Track which resources are in-flight by atomically incrementing their "in_use" count
4720static bool validateAndIncrementResources(layer_data *my_data, GLOBAL_CB_NODE *pCB) {
4721    bool skip_call = false;
4722    for (auto drawDataElement : pCB->drawData) {
4723        for (auto buffer : drawDataElement.buffers) {
4724            auto buffer_data = my_data->bufferMap.find(buffer);
4725            if (buffer_data == my_data->bufferMap.end()) {
4726                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4727                                     (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4728                                     "Cannot submit cmd buffer using deleted buffer %" PRIu64 ".", (uint64_t)(buffer));
4729            } else {
4730                buffer_data->second.in_use.fetch_add(1);
4731            }
4732        }
4733    }
4734    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4735        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4736            auto setNode = my_data->setMap.find(set);
4737            if (setNode == my_data->setMap.end()) {
4738                skip_call |=
4739                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4740                            (uint64_t)(set), __LINE__, DRAWSTATE_INVALID_DESCRIPTOR_SET, "DS",
4741                            "Cannot submit cmd buffer using deleted descriptor set %" PRIu64 ".", (uint64_t)(set));
4742            } else {
4743                setNode->second->in_use.fetch_add(1);
4744            }
4745        }
4746    }
4747    for (auto semaphore : pCB->semaphores) {
4748        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4749        if (semaphoreNode == my_data->semaphoreMap.end()) {
4750            skip_call |=
4751                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4752                        reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
4753                        "Cannot submit cmd buffer using deleted semaphore %" PRIu64 ".", reinterpret_cast<uint64_t &>(semaphore));
4754        } else {
4755            semaphoreNode->second.in_use.fetch_add(1);
4756        }
4757    }
4758    for (auto event : pCB->events) {
4759        auto eventNode = my_data->eventMap.find(event);
4760        if (eventNode == my_data->eventMap.end()) {
4761            skip_call |=
4762                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4763                        reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
4764                        "Cannot submit cmd buffer using deleted event %" PRIu64 ".", reinterpret_cast<uint64_t &>(event));
4765        } else {
4766            eventNode->second.in_use.fetch_add(1);
4767        }
4768    }
4769    return skip_call;
4770}
4771
4772// Note: This function assumes that the global lock is held by the calling
4773// thread.
4774static bool cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) {
4775    bool skip_call = false;
4776    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
4777    if (pCB) {
4778        for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) {
4779            for (auto event : queryEventsPair.second) {
4780                if (my_data->eventMap[event].needsSignaled) {
4781                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4782                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
4783                                         "Cannot get query results on queryPool %" PRIu64
4784                                         " with index %d which was guarded by unsignaled event %" PRIu64 ".",
4785                                         (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
4786                }
4787            }
4788        }
4789    }
4790    return skip_call;
4791}
4792// Decrement cmd_buffer in_use and if it goes to 0 remove cmd_buffer from globalInFlightCmdBuffers
4793static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer) {
4794    // Pull it off of global list initially, but if we find it in any other queue list, add it back in
4795    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmd_buffer);
4796    pCB->in_use.fetch_sub(1);
4797    if (!pCB->in_use.load()) {
4798        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
4799    }
4800}
4801
4802static void decrementResources(layer_data *my_data, VkCommandBuffer cmdBuffer) {
4803    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
4804    for (auto drawDataElement : pCB->drawData) {
4805        for (auto buffer : drawDataElement.buffers) {
4806            auto buffer_data = my_data->bufferMap.find(buffer);
4807            if (buffer_data != my_data->bufferMap.end()) {
4808                buffer_data->second.in_use.fetch_sub(1);
4809            }
4810        }
4811    }
4812    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4813        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4814            auto setNode = my_data->setMap.find(set);
4815            if (setNode != my_data->setMap.end()) {
4816                setNode->second->in_use.fetch_sub(1);
4817            }
4818        }
4819    }
4820    for (auto semaphore : pCB->semaphores) {
4821        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4822        if (semaphoreNode != my_data->semaphoreMap.end()) {
4823            semaphoreNode->second.in_use.fetch_sub(1);
4824        }
4825    }
4826    for (auto event : pCB->events) {
4827        auto eventNode = my_data->eventMap.find(event);
4828        if (eventNode != my_data->eventMap.end()) {
4829            eventNode->second.in_use.fetch_sub(1);
4830        }
4831    }
4832    for (auto queryStatePair : pCB->queryToStateMap) {
4833        my_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4834    }
4835    for (auto eventStagePair : pCB->eventToStageMap) {
4836        my_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4837    }
4838}
4839// For fenceCount fences in pFences, mark fence signaled, decrement in_use, and call
4840//  decrementResources for all priorFences and cmdBuffers associated with fence.
4841static bool decrementResources(layer_data *my_data, uint32_t fenceCount, const VkFence *pFences) {
4842    bool skip_call = false;
4843    for (uint32_t i = 0; i < fenceCount; ++i) {
4844        auto fence_data = my_data->fenceMap.find(pFences[i]);
4845        if (fence_data == my_data->fenceMap.end() || !fence_data->second.needsSignaled)
4846            return skip_call;
4847        fence_data->second.needsSignaled = false;
4848        fence_data->second.in_use.fetch_sub(1);
4849        decrementResources(my_data, static_cast<uint32_t>(fence_data->second.priorFences.size()),
4850                           fence_data->second.priorFences.data());
4851        for (auto cmdBuffer : fence_data->second.cmdBuffers) {
4852            decrementResources(my_data, cmdBuffer);
4853            skip_call |= cleanInFlightCmdBuffer(my_data, cmdBuffer);
4854            removeInFlightCmdBuffer(my_data, cmdBuffer);
4855        }
4856    }
4857    return skip_call;
4858}
4859// Decrement in_use for all outstanding cmd buffers that were submitted on this queue
4860static bool decrementResources(layer_data *my_data, VkQueue queue) {
4861    bool skip_call = false;
4862    auto queue_data = my_data->queueMap.find(queue);
4863    if (queue_data != my_data->queueMap.end()) {
4864        for (auto cmdBuffer : queue_data->second.untrackedCmdBuffers) {
4865            decrementResources(my_data, cmdBuffer);
4866            skip_call |= cleanInFlightCmdBuffer(my_data, cmdBuffer);
4867            removeInFlightCmdBuffer(my_data, cmdBuffer);
4868        }
4869        queue_data->second.untrackedCmdBuffers.clear();
4870        skip_call |= decrementResources(my_data, static_cast<uint32_t>(queue_data->second.lastFences.size()),
4871                                        queue_data->second.lastFences.data());
4872    }
4873    return skip_call;
4874}
4875
4876// This function merges command buffer tracking between queues when there is a semaphore dependency
4877// between them (see below for details as to how tracking works). When this happens, the prior
4878// fences from the signaling queue are merged into the wait queue as well as any untracked command
4879// buffers.
4880static void updateTrackedCommandBuffers(layer_data *dev_data, VkQueue queue, VkQueue other_queue, VkFence fence) {
4881    if (queue == other_queue) {
4882        return;
4883    }
4884    auto queue_data = dev_data->queueMap.find(queue);
4885    auto other_queue_data = dev_data->queueMap.find(other_queue);
4886    if (queue_data == dev_data->queueMap.end() || other_queue_data == dev_data->queueMap.end()) {
4887        return;
4888    }
4889    for (auto fenceInner : other_queue_data->second.lastFences) {
4890        queue_data->second.lastFences.push_back(fenceInner);
4891    }
4892    if (fence != VK_NULL_HANDLE) {
4893        auto fence_data = dev_data->fenceMap.find(fence);
4894        if (fence_data == dev_data->fenceMap.end()) {
4895            return;
4896        }
4897        for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) {
4898            fence_data->second.cmdBuffers.push_back(cmdbuffer);
4899        }
4900        other_queue_data->second.untrackedCmdBuffers.clear();
4901    } else {
4902        for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) {
4903            queue_data->second.untrackedCmdBuffers.push_back(cmdbuffer);
4904        }
4905        other_queue_data->second.untrackedCmdBuffers.clear();
4906    }
4907    for (auto eventStagePair : other_queue_data->second.eventToStageMap) {
4908        queue_data->second.eventToStageMap[eventStagePair.first] = eventStagePair.second;
4909    }
4910}
4911
4912// This is the core function for tracking command buffers. There are two primary ways command
4913// buffers are tracked. When submitted they are stored in the command buffer list associated
4914// with a fence or the untracked command buffer list associated with a queue if no fence is used.
4915// Each queue also stores the last fence that was submitted onto the queue. This allows us to
4916// create a linked list of fences and their associated command buffers so if one fence is
4917// waited on, prior fences on that queue are also considered to have been waited on. When a fence is
4918// waited on (either via a queue, device or fence), we free the cmd buffers for that fence and
4919// recursively call with the prior fences.
4920static void trackCommandBuffers(layer_data *my_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
4921                                VkFence fence) {
4922    auto queue_data = my_data->queueMap.find(queue);
4923    if (fence != VK_NULL_HANDLE) {
4924        vector<VkFence> prior_fences;
4925        auto fence_data = my_data->fenceMap.find(fence);
4926        if (fence_data == my_data->fenceMap.end()) {
4927            return;
4928        }
4929        fence_data->second.cmdBuffers.clear();
4930        if (queue_data != my_data->queueMap.end()) {
4931            prior_fences = queue_data->second.lastFences;
4932            queue_data->second.lastFences.clear();
4933            queue_data->second.lastFences.push_back(fence);
4934            for (auto cmdbuffer : queue_data->second.untrackedCmdBuffers) {
4935                fence_data->second.cmdBuffers.push_back(cmdbuffer);
4936            }
4937            queue_data->second.untrackedCmdBuffers.clear();
4938        }
4939        fence_data->second.priorFences = prior_fences;
4940        fence_data->second.needsSignaled = true;
4941        fence_data->second.queue = queue;
4942        fence_data->second.in_use.fetch_add(1);
4943        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4944            const VkSubmitInfo *submit = &pSubmits[submit_idx];
4945            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
4946                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
4947                    fence_data->second.cmdBuffers.push_back(secondaryCmdBuffer);
4948                }
4949                fence_data->second.cmdBuffers.push_back(submit->pCommandBuffers[i]);
4950            }
4951        }
4952    } else {
4953        if (queue_data != my_data->queueMap.end()) {
4954            for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4955                const VkSubmitInfo *submit = &pSubmits[submit_idx];
4956                for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
4957                    for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
4958                        queue_data->second.untrackedCmdBuffers.push_back(secondaryCmdBuffer);
4959                    }
4960                    queue_data->second.untrackedCmdBuffers.push_back(submit->pCommandBuffers[i]);
4961                }
4962            }
4963        }
4964    }
4965}
4966
4967static void markCommandBuffersInFlight(layer_data *my_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
4968                                       VkFence fence) {
4969    auto queue_data = my_data->queueMap.find(queue);
4970    if (queue_data != my_data->queueMap.end()) {
4971        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4972            const VkSubmitInfo *submit = &pSubmits[submit_idx];
4973            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
4974                // Add cmdBuffers to the global set and increment count
4975                GLOBAL_CB_NODE *pCB = getCBNode(my_data, submit->pCommandBuffers[i]);
4976                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
4977                    my_data->globalInFlightCmdBuffers.insert(secondaryCmdBuffer);
4978                    GLOBAL_CB_NODE *pSubCB = getCBNode(my_data, secondaryCmdBuffer);
4979                    pSubCB->in_use.fetch_add(1);
4980                }
4981                my_data->globalInFlightCmdBuffers.insert(submit->pCommandBuffers[i]);
4982                pCB->in_use.fetch_add(1);
4983            }
4984        }
4985    }
4986}
4987
4988static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4989    bool skip_call = false;
4990    if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
4991        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4992        skip_call |=
4993            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4994                    __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
4995                    "Command Buffer %#" PRIx64 " is already in use and is not marked for simultaneous use.",
4996                    reinterpret_cast<uint64_t>(pCB->commandBuffer));
4997    }
4998    return skip_call;
4999}
5000
5001static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5002    bool skipCall = false;
5003    // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
5004    if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
5005        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5006                            __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
5007                            "CB %#" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
5008                            "set, but has been submitted %#" PRIxLEAST64 " times.",
5009                            (uint64_t)(pCB->commandBuffer), pCB->submitCount);
5010    }
5011    // Validate that cmd buffers have been updated
5012    if (CB_RECORDED != pCB->state) {
5013        if (CB_INVALID == pCB->state) {
5014            // Inform app of reason CB invalid
5015            bool causeReported = false;
5016            if (!pCB->destroyedSets.empty()) {
5017                std::stringstream set_string;
5018                for (auto set : pCB->destroyedSets)
5019                    set_string << " " << set;
5020
5021                skipCall |=
5022                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5023                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5024                            "You are submitting command buffer %#" PRIxLEAST64
5025                            " that is invalid because it had the following bound descriptor set(s) destroyed: %s",
5026                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
5027                causeReported = true;
5028            }
5029            if (!pCB->updatedSets.empty()) {
5030                std::stringstream set_string;
5031                for (auto set : pCB->updatedSets)
5032                    set_string << " " << set;
5033
5034                skipCall |=
5035                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5036                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5037                            "You are submitting command buffer %#" PRIxLEAST64
5038                            " that is invalid because it had the following bound descriptor set(s) updated: %s",
5039                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
5040                causeReported = true;
5041            }
5042            if (!pCB->destroyedFramebuffers.empty()) {
5043                std::stringstream fb_string;
5044                for (auto fb : pCB->destroyedFramebuffers)
5045                    fb_string << " " << fb;
5046
5047                skipCall |=
5048                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5049                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5050                            "You are submitting command buffer %#" PRIxLEAST64 " that is invalid because it had the following "
5051                            "referenced framebuffers destroyed: %s",
5052                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), fb_string.str().c_str());
5053                causeReported = true;
5054            }
5055            // TODO : This is defensive programming to make sure an error is
5056            //  flagged if we hit this INVALID cmd buffer case and none of the
5057            //  above cases are hit. As the number of INVALID cases grows, this
5058            //  code should be updated to seemlessly handle all the cases.
5059            if (!causeReported) {
5060                skipCall |= log_msg(
5061                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5062                    reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5063                    "You are submitting command buffer %#" PRIxLEAST64 " that is invalid due to an unknown cause. Validation "
5064                    "should "
5065                    "be improved to report the exact cause.",
5066                    reinterpret_cast<uint64_t &>(pCB->commandBuffer));
5067            }
5068        } else { // Flag error for using CB w/o vkEndCommandBuffer() called
5069            skipCall |=
5070                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5071                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
5072                        "You must call vkEndCommandBuffer() on CB %#" PRIxLEAST64 " before this call to vkQueueSubmit()!",
5073                        (uint64_t)(pCB->commandBuffer));
5074        }
5075    }
5076    return skipCall;
5077}
5078
5079static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5080    // Track in-use for resources off of primary and any secondary CBs
5081    bool skipCall = validateAndIncrementResources(dev_data, pCB);
5082    if (!pCB->secondaryCommandBuffers.empty()) {
5083        for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
5084            skipCall |= validateAndIncrementResources(dev_data, dev_data->commandBufferMap[secondaryCmdBuffer]);
5085            GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
5086            if (pSubCB->primaryCommandBuffer != pCB->commandBuffer) {
5087                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5088                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
5089                        "CB %#" PRIxLEAST64 " was submitted with secondary buffer %#" PRIxLEAST64
5090                        " but that buffer has subsequently been bound to "
5091                        "primary cmd buffer %#" PRIxLEAST64 ".",
5092                        reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer),
5093                        reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer));
5094            }
5095        }
5096    }
5097    skipCall |= validateCommandBufferState(dev_data, pCB);
5098    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
5099    // on device
5100    skipCall |= validateCommandBufferSimultaneousUse(dev_data, pCB);
5101    return skipCall;
5102}
5103
5104VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5105vkQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
5106    bool skipCall = false;
5107    GLOBAL_CB_NODE *pCBNode = NULL;
5108    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5109    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5110    std::unique_lock<std::mutex> lock(global_lock);
5111    // First verify that fence is not in use
5112    if (fence != VK_NULL_HANDLE) {
5113        dev_data->fenceMap[fence].queue = queue;
5114        if ((submitCount != 0) && dev_data->fenceMap[fence].in_use.load()) {
5115            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5116                                (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5117                                "Fence %#" PRIx64 " is already in use by another submission.", (uint64_t)(fence));
5118        }
5119        if (!dev_data->fenceMap[fence].needsSignaled) {
5120            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5121                                reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5122                                "Fence %#" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
5123                                reinterpret_cast<uint64_t &>(fence));
5124        }
5125    }
5126    // TODO : Review these old print functions and clean up as appropriate
5127    print_mem_list(dev_data);
5128    printCBList(dev_data);
5129    // Update cmdBuffer-related data structs and mark fence in-use
5130    trackCommandBuffers(dev_data, queue, submitCount, pSubmits, fence);
5131    // Now verify each individual submit
5132    std::unordered_set<VkQueue> processed_other_queues;
5133    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5134        const VkSubmitInfo *submit = &pSubmits[submit_idx];
5135        vector<VkSemaphore> semaphoreList;
5136        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
5137            const VkSemaphore &semaphore = submit->pWaitSemaphores[i];
5138            semaphoreList.push_back(semaphore);
5139            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
5140                if (dev_data->semaphoreMap[semaphore].signaled) {
5141                    dev_data->semaphoreMap[semaphore].signaled = false;
5142                } else {
5143                    skipCall |=
5144                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5145                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
5146                                "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
5147                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
5148                }
5149                const VkQueue &other_queue = dev_data->semaphoreMap[semaphore].queue;
5150                if (other_queue != VK_NULL_HANDLE && !processed_other_queues.count(other_queue)) {
5151                    updateTrackedCommandBuffers(dev_data, queue, other_queue, fence);
5152                    processed_other_queues.insert(other_queue);
5153                }
5154            }
5155        }
5156        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
5157            const VkSemaphore &semaphore = submit->pSignalSemaphores[i];
5158            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
5159                semaphoreList.push_back(semaphore);
5160                if (dev_data->semaphoreMap[semaphore].signaled) {
5161                    skipCall |=
5162                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5163                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
5164                                "Queue %#" PRIx64 " is signaling semaphore %#" PRIx64
5165                                " that has already been signaled but not waited on by queue %#" PRIx64 ".",
5166                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore),
5167                                reinterpret_cast<uint64_t &>(dev_data->semaphoreMap[semaphore].queue));
5168                } else {
5169                    dev_data->semaphoreMap[semaphore].signaled = true;
5170                    dev_data->semaphoreMap[semaphore].queue = queue;
5171                }
5172            }
5173        }
5174        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
5175            skipCall |= ValidateCmdBufImageLayouts(submit->pCommandBuffers[i]);
5176            pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
5177            if (pCBNode) {
5178                pCBNode->semaphores = semaphoreList;
5179                pCBNode->submitCount++; // increment submit count
5180                pCBNode->lastSubmittedFence = fence;
5181                pCBNode->lastSubmittedQueue = queue;
5182                skipCall |= validatePrimaryCommandBufferState(dev_data, pCBNode);
5183                // Call submit-time functions to validate/update state
5184                for (auto &function : pCBNode->validate_functions) {
5185                    skipCall |= function();
5186                }
5187                for (auto &function : pCBNode->eventUpdates) {
5188                    skipCall |= function(queue);
5189                }
5190            }
5191        }
5192    }
5193    markCommandBuffersInFlight(dev_data, queue, submitCount, pSubmits, fence);
5194    lock.unlock();
5195    if (!skipCall)
5196        result = dev_data->device_dispatch_table->QueueSubmit(queue, submitCount, pSubmits, fence);
5197
5198    return result;
5199}
5200
5201#if MTMERGESOURCE
5202VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
5203                                                                const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
5204    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5205    VkResult result = my_data->device_dispatch_table->AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
5206    // TODO : Track allocations and overall size here
5207    std::lock_guard<std::mutex> lock(global_lock);
5208    add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo);
5209    print_mem_list(my_data);
5210    return result;
5211}
5212
5213VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5214vkFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
5215    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5216
5217    // From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed.
5218    // Before freeing a memory object, an application must ensure the memory object is no longer
5219    // in use by the device—for example by command buffers queued for execution. The memory need
5220    // not yet be unbound from all images and buffers, but any further use of those images or
5221    // buffers (on host or device) for anything other than destroying those objects will result in
5222    // undefined behavior.
5223
5224    std::unique_lock<std::mutex> lock(global_lock);
5225    freeMemObjInfo(my_data, device, mem, false);
5226    print_mem_list(my_data);
5227    printCBList(my_data);
5228    lock.unlock();
5229    my_data->device_dispatch_table->FreeMemory(device, mem, pAllocator);
5230}
5231
5232static bool validateMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5233    bool skipCall = false;
5234
5235    if (size == 0) {
5236        // TODO: a size of 0 is not listed as an invalid use in the spec, should it be?
5237        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5238                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5239                           "VkMapMemory: Attempting to map memory range of size zero");
5240    }
5241
5242    auto mem_element = my_data->memObjMap.find(mem);
5243    if (mem_element != my_data->memObjMap.end()) {
5244        // It is an application error to call VkMapMemory on an object that is already mapped
5245        if (mem_element->second.memRange.size != 0) {
5246            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5247                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5248                               "VkMapMemory: Attempting to map memory on an already-mapped object %#" PRIxLEAST64, (uint64_t)mem);
5249        }
5250
5251        // Validate that offset + size is within object's allocationSize
5252        if (size == VK_WHOLE_SIZE) {
5253            if (offset >= mem_element->second.allocInfo.allocationSize) {
5254                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5255                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5256                                   "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset,
5257                                   mem_element->second.allocInfo.allocationSize, mem_element->second.allocInfo.allocationSize);
5258            }
5259        } else {
5260            if ((offset + size) > mem_element->second.allocInfo.allocationSize) {
5261                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5262                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5263                                   "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset,
5264                                   size + offset, mem_element->second.allocInfo.allocationSize);
5265            }
5266        }
5267    }
5268    return skipCall;
5269}
5270
5271static void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5272    auto mem_element = my_data->memObjMap.find(mem);
5273    if (mem_element != my_data->memObjMap.end()) {
5274        MemRange new_range;
5275        new_range.offset = offset;
5276        new_range.size = size;
5277        mem_element->second.memRange = new_range;
5278    }
5279}
5280
5281static bool deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
5282    bool skipCall = false;
5283    auto mem_element = my_data->memObjMap.find(mem);
5284    if (mem_element != my_data->memObjMap.end()) {
5285        if (!mem_element->second.memRange.size) {
5286            // Valid Usage: memory must currently be mapped
5287            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5288                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5289                               "Unmapping Memory without memory being mapped: mem obj %#" PRIxLEAST64, (uint64_t)mem);
5290        }
5291        mem_element->second.memRange.size = 0;
5292        if (mem_element->second.pData) {
5293            free(mem_element->second.pData);
5294            mem_element->second.pData = 0;
5295        }
5296    }
5297    return skipCall;
5298}
5299
5300static char NoncoherentMemoryFillValue = 0xb;
5301
5302static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize size, void **ppData) {
5303    auto mem_element = dev_data->memObjMap.find(mem);
5304    if (mem_element != dev_data->memObjMap.end()) {
5305        mem_element->second.pDriverData = *ppData;
5306        uint32_t index = mem_element->second.allocInfo.memoryTypeIndex;
5307        if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
5308            mem_element->second.pData = 0;
5309        } else {
5310            if (size == VK_WHOLE_SIZE) {
5311                size = mem_element->second.allocInfo.allocationSize;
5312            }
5313            size_t convSize = (size_t)(size);
5314            mem_element->second.pData = malloc(2 * convSize);
5315            memset(mem_element->second.pData, NoncoherentMemoryFillValue, 2 * convSize);
5316            *ppData = static_cast<char *>(mem_element->second.pData) + (convSize / 2);
5317        }
5318    }
5319}
5320#endif
5321// Verify that state for fence being waited on is appropriate. That is,
5322//  a fence being waited on should not already be signalled and
5323//  it should have been submitted on a queue or during acquire next image
5324static inline bool verifyWaitFenceState(VkDevice device, VkFence fence, const char *apiCall) {
5325    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5326    bool skipCall = false;
5327    auto pFenceInfo = my_data->fenceMap.find(fence);
5328    if (pFenceInfo != my_data->fenceMap.end()) {
5329        if (!pFenceInfo->second.firstTimeFlag) {
5330            if (!pFenceInfo->second.needsSignaled) {
5331                skipCall |=
5332                    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5333                            (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5334                            "%s specified fence %#" PRIxLEAST64 " already in SIGNALED state.", apiCall, (uint64_t)fence);
5335            }
5336            if (!pFenceInfo->second.queue && !pFenceInfo->second.swapchain) { // Checking status of unsubmitted fence
5337                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5338                                    reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5339                                    "%s called for fence %#" PRIxLEAST64 " which has not been submitted on a Queue or during "
5340                                    "acquire next image.",
5341                                    apiCall, reinterpret_cast<uint64_t &>(fence));
5342            }
5343        } else {
5344            pFenceInfo->second.firstTimeFlag = false;
5345        }
5346    }
5347    return skipCall;
5348}
5349
5350VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5351vkWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
5352    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5353    bool skip_call = false;
5354    // Verify fence status of submitted fences
5355    std::unique_lock<std::mutex> lock(global_lock);
5356    for (uint32_t i = 0; i < fenceCount; i++) {
5357        skip_call |= verifyWaitFenceState(device, pFences[i], "vkWaitForFences");
5358    }
5359    lock.unlock();
5360    if (skip_call)
5361        return VK_ERROR_VALIDATION_FAILED_EXT;
5362
5363    VkResult result = dev_data->device_dispatch_table->WaitForFences(device, fenceCount, pFences, waitAll, timeout);
5364
5365    if (result == VK_SUCCESS) {
5366        lock.lock();
5367        // When we know that all fences are complete we can clean/remove their CBs
5368        if (waitAll || fenceCount == 1) {
5369            skip_call |= decrementResources(dev_data, fenceCount, pFences);
5370        }
5371        // NOTE : Alternate case not handled here is when some fences have completed. In
5372        //  this case for app to guarantee which fences completed it will have to call
5373        //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
5374        lock.unlock();
5375    }
5376    if (skip_call)
5377        return VK_ERROR_VALIDATION_FAILED_EXT;
5378    return result;
5379}
5380
5381VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(VkDevice device, VkFence fence) {
5382    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5383    bool skipCall = false;
5384    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5385    std::unique_lock<std::mutex> lock(global_lock);
5386    skipCall = verifyWaitFenceState(device, fence, "vkGetFenceStatus");
5387    lock.unlock();
5388
5389    if (skipCall)
5390        return result;
5391
5392    result = dev_data->device_dispatch_table->GetFenceStatus(device, fence);
5393    bool skip_call = false;
5394    lock.lock();
5395    if (result == VK_SUCCESS) {
5396        skipCall |= decrementResources(dev_data, 1, &fence);
5397    }
5398    lock.unlock();
5399    if (skip_call)
5400        return VK_ERROR_VALIDATION_FAILED_EXT;
5401    return result;
5402}
5403
5404VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
5405                                                            VkQueue *pQueue) {
5406    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5407    dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
5408    std::lock_guard<std::mutex> lock(global_lock);
5409
5410    // Add queue to tracking set only if it is new
5411    auto result = dev_data->queues.emplace(*pQueue);
5412    if (result.second == true) {
5413        QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue];
5414        pQNode->device = device;
5415    }
5416}
5417
5418VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(VkQueue queue) {
5419    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5420    bool skip_call = false;
5421    skip_call |= decrementResources(dev_data, queue);
5422    if (skip_call)
5423        return VK_ERROR_VALIDATION_FAILED_EXT;
5424    VkResult result = dev_data->device_dispatch_table->QueueWaitIdle(queue);
5425    return result;
5426}
5427
5428VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(VkDevice device) {
5429    bool skip_call = false;
5430    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5431    std::unique_lock<std::mutex> lock(global_lock);
5432    for (auto queue : dev_data->queues) {
5433        skip_call |= decrementResources(dev_data, queue);
5434    }
5435    dev_data->globalInFlightCmdBuffers.clear();
5436    lock.unlock();
5437    if (skip_call)
5438        return VK_ERROR_VALIDATION_FAILED_EXT;
5439    VkResult result = dev_data->device_dispatch_table->DeviceWaitIdle(device);
5440    return result;
5441}
5442
5443VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
5444    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5445    bool skipCall = false;
5446    std::unique_lock<std::mutex> lock(global_lock);
5447    auto fence_pair = dev_data->fenceMap.find(fence);
5448    if (fence_pair != dev_data->fenceMap.end()) {
5449        if (fence_pair->second.in_use.load()) {
5450            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5451                                (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5452                                "Fence %#" PRIx64 " is in use by a command buffer.", (uint64_t)(fence));
5453        }
5454        dev_data->fenceMap.erase(fence_pair);
5455    }
5456    lock.unlock();
5457
5458    if (!skipCall)
5459        dev_data->device_dispatch_table->DestroyFence(device, fence, pAllocator);
5460}
5461
5462VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5463vkDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
5464    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5465    dev_data->device_dispatch_table->DestroySemaphore(device, semaphore, pAllocator);
5466    std::lock_guard<std::mutex> lock(global_lock);
5467    auto item = dev_data->semaphoreMap.find(semaphore);
5468    if (item != dev_data->semaphoreMap.end()) {
5469        if (item->second.in_use.load()) {
5470            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5471                    reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
5472                    "Cannot delete semaphore %" PRIx64 " which is in use.", reinterpret_cast<uint64_t &>(semaphore));
5473        }
5474        dev_data->semaphoreMap.erase(semaphore);
5475    }
5476    // TODO : Clean up any internal data structures using this obj.
5477}
5478
5479VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
5480    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5481    bool skip_call = false;
5482    std::unique_lock<std::mutex> lock(global_lock);
5483    auto event_data = dev_data->eventMap.find(event);
5484    if (event_data != dev_data->eventMap.end()) {
5485        if (event_data->second.in_use.load()) {
5486            skip_call |= log_msg(
5487                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5488                reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
5489                "Cannot delete event %" PRIx64 " which is in use by a command buffer.", reinterpret_cast<uint64_t &>(event));
5490        }
5491        dev_data->eventMap.erase(event_data);
5492    }
5493    lock.unlock();
5494    if (!skip_call)
5495        dev_data->device_dispatch_table->DestroyEvent(device, event, pAllocator);
5496    // TODO : Clean up any internal data structures using this obj.
5497}
5498
5499VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5500vkDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
5501    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5502        ->device_dispatch_table->DestroyQueryPool(device, queryPool, pAllocator);
5503    // TODO : Clean up any internal data structures using this obj.
5504}
5505
5506VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
5507                                                     uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
5508                                                     VkQueryResultFlags flags) {
5509    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5510    unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight;
5511    GLOBAL_CB_NODE *pCB = nullptr;
5512    std::unique_lock<std::mutex> lock(global_lock);
5513    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5514        pCB = getCBNode(dev_data, cmdBuffer);
5515        for (auto queryStatePair : pCB->queryToStateMap) {
5516            queriesInFlight[queryStatePair.first].push_back(cmdBuffer);
5517        }
5518    }
5519    bool skip_call = false;
5520    for (uint32_t i = 0; i < queryCount; ++i) {
5521        QueryObject query = {queryPool, firstQuery + i};
5522        auto queryElement = queriesInFlight.find(query);
5523        auto queryToStateElement = dev_data->queryToStateMap.find(query);
5524        if (queryToStateElement != dev_data->queryToStateMap.end()) {
5525            // Available and in flight
5526            if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5527                queryToStateElement->second) {
5528                for (auto cmdBuffer : queryElement->second) {
5529                    pCB = getCBNode(dev_data, cmdBuffer);
5530                    auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query);
5531                    if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) {
5532                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5533                                             VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5534                                             "Cannot get query results on queryPool %" PRIu64 " with index %d which is in flight.",
5535                                             (uint64_t)(queryPool), firstQuery + i);
5536                    } else {
5537                        for (auto event : queryEventElement->second) {
5538                            dev_data->eventMap[event].needsSignaled = true;
5539                        }
5540                    }
5541                }
5542                // Unavailable and in flight
5543            } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5544                       !queryToStateElement->second) {
5545                // TODO : Can there be the same query in use by multiple command buffers in flight?
5546                bool make_available = false;
5547                for (auto cmdBuffer : queryElement->second) {
5548                    pCB = getCBNode(dev_data, cmdBuffer);
5549                    make_available |= pCB->queryToStateMap[query];
5550                }
5551                if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5552                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5553                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5554                                         "Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.",
5555                                         (uint64_t)(queryPool), firstQuery + i);
5556                }
5557                // Unavailable
5558            } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) {
5559                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5560                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5561                                     "Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.",
5562                                     (uint64_t)(queryPool), firstQuery + i);
5563                // Unitialized
5564            } else if (queryToStateElement == dev_data->queryToStateMap.end()) {
5565                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5566                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5567                                     "Cannot get query results on queryPool %" PRIu64
5568                                     " with index %d as data has not been collected for this index.",
5569                                     (uint64_t)(queryPool), firstQuery + i);
5570            }
5571        }
5572    }
5573    lock.unlock();
5574    if (skip_call)
5575        return VK_ERROR_VALIDATION_FAILED_EXT;
5576    return dev_data->device_dispatch_table->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride,
5577                                                                flags);
5578}
5579
5580static bool validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5581    bool skip_call = false;
5582    auto buffer_data = my_data->bufferMap.find(buffer);
5583    if (buffer_data == my_data->bufferMap.end()) {
5584        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5585                             (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5586                             "Cannot free buffer %" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5587    } else {
5588        if (buffer_data->second.in_use.load()) {
5589            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5590                                 (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
5591                                 "Cannot free buffer %" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer));
5592        }
5593    }
5594    return skip_call;
5595}
5596
5597VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5598vkDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
5599    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5600    bool skipCall = false;
5601    std::unique_lock<std::mutex> lock(global_lock);
5602    if (!validateIdleBuffer(dev_data, buffer) && !skipCall) {
5603        lock.unlock();
5604        dev_data->device_dispatch_table->DestroyBuffer(device, buffer, pAllocator);
5605        lock.lock();
5606    }
5607    dev_data->bufferMap.erase(buffer);
5608}
5609
5610VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5611vkDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5612    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5613    dev_data->device_dispatch_table->DestroyBufferView(device, bufferView, pAllocator);
5614    std::lock_guard<std::mutex> lock(global_lock);
5615    auto item = dev_data->bufferViewMap.find(bufferView);
5616    if (item != dev_data->bufferViewMap.end()) {
5617        dev_data->bufferViewMap.erase(item);
5618    }
5619}
5620
5621VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5622    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5623    bool skipCall = false;
5624    if (!skipCall)
5625        dev_data->device_dispatch_table->DestroyImage(device, image, pAllocator);
5626
5627    std::lock_guard<std::mutex> lock(global_lock);
5628    const auto& entry = dev_data->imageMap.find(image);
5629    if (entry != dev_data->imageMap.end()) {
5630        // Clear any memory mapping for this image
5631        auto mem_entry = dev_data->memObjMap.find(entry->second.mem);
5632        if (mem_entry != dev_data->memObjMap.end())
5633            mem_entry->second.image = VK_NULL_HANDLE;
5634
5635        // Remove image from imageMap
5636        dev_data->imageMap.erase(entry);
5637    }
5638    const auto& subEntry = dev_data->imageSubresourceMap.find(image);
5639    if (subEntry != dev_data->imageSubresourceMap.end()) {
5640        for (const auto& pair : subEntry->second) {
5641            dev_data->imageLayoutMap.erase(pair);
5642        }
5643        dev_data->imageSubresourceMap.erase(subEntry);
5644    }
5645}
5646#if MTMERGESOURCE
5647static bool print_memory_range_error(layer_data *dev_data, const uint64_t object_handle, const uint64_t other_handle,
5648                                     VkDebugReportObjectTypeEXT object_type) {
5649    if (object_type == VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT) {
5650        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5651                       MEMTRACK_INVALID_ALIASING, "MEM", "Buffer %" PRIx64 " is alised with image %" PRIx64, object_handle,
5652                       other_handle);
5653    } else {
5654        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5655                       MEMTRACK_INVALID_ALIASING, "MEM", "Image %" PRIx64 " is alised with buffer %" PRIx64, object_handle,
5656                       other_handle);
5657    }
5658}
5659
5660static bool validate_memory_range(layer_data *dev_data, const vector<MEMORY_RANGE> &ranges, const MEMORY_RANGE &new_range,
5661                                  VkDebugReportObjectTypeEXT object_type) {
5662    bool skip_call = false;
5663
5664    for (auto range : ranges) {
5665        if ((range.end & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)) <
5666            (new_range.start & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)))
5667            continue;
5668        if ((range.start & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)) >
5669            (new_range.end & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)))
5670            continue;
5671        skip_call |= print_memory_range_error(dev_data, new_range.handle, range.handle, object_type);
5672    }
5673    return skip_call;
5674}
5675
5676static bool validate_buffer_image_aliasing(layer_data *dev_data, uint64_t handle, VkDeviceMemory mem, VkDeviceSize memoryOffset,
5677                                           VkMemoryRequirements memRequirements, vector<MEMORY_RANGE> &ranges,
5678                                           const vector<MEMORY_RANGE> &other_ranges, VkDebugReportObjectTypeEXT object_type) {
5679    MEMORY_RANGE range;
5680    range.handle = handle;
5681    range.memory = mem;
5682    range.start = memoryOffset;
5683    range.end = memoryOffset + memRequirements.size - 1;
5684    ranges.push_back(range);
5685    return validate_memory_range(dev_data, other_ranges, range, object_type);
5686}
5687
5688VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5689vkBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
5690    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5691    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5692    std::unique_lock<std::mutex> lock(global_lock);
5693    // Track objects tied to memory
5694    uint64_t buffer_handle = (uint64_t)(buffer);
5695    bool skipCall =
5696        set_mem_binding(dev_data, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
5697    auto buffer_node = dev_data->bufferMap.find(buffer);
5698    if (buffer_node != dev_data->bufferMap.end()) {
5699        buffer_node->second.mem = mem;
5700        VkMemoryRequirements memRequirements;
5701        dev_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, &memRequirements);
5702        skipCall |= validate_buffer_image_aliasing(dev_data, buffer_handle, mem, memoryOffset, memRequirements,
5703                                                   dev_data->memObjMap[mem].bufferRanges, dev_data->memObjMap[mem].imageRanges,
5704                                                   VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5705        // Validate memory requirements alignment
5706        if (vk_safe_modulo(memoryOffset, memRequirements.alignment) != 0) {
5707            skipCall |=
5708                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
5709                        __LINE__, DRAWSTATE_INVALID_BUFFER_MEMORY_OFFSET, "DS",
5710                        "vkBindBufferMemory(): memoryOffset is %#" PRIxLEAST64 " but must be an integer multiple of the "
5711                        "VkMemoryRequirements::alignment value %#" PRIxLEAST64
5712                        ", returned from a call to vkGetBufferMemoryRequirements with buffer",
5713                        memoryOffset, memRequirements.alignment);
5714        }
5715        // Validate device limits alignments
5716        VkBufferUsageFlags usage = dev_data->bufferMap[buffer].createInfo.usage;
5717        if (usage & (VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT)) {
5718            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment) != 0) {
5719                skipCall |=
5720                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5721                            0, __LINE__, DRAWSTATE_INVALID_TEXEL_BUFFER_OFFSET, "DS",
5722                            "vkBindBufferMemory(): memoryOffset is %#" PRIxLEAST64 " but must be a multiple of "
5723                            "device limit minTexelBufferOffsetAlignment %#" PRIxLEAST64,
5724                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment);
5725            }
5726        }
5727        if (usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) {
5728            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) !=
5729                0) {
5730                skipCall |=
5731                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5732                            0, __LINE__, DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
5733                            "vkBindBufferMemory(): memoryOffset is %#" PRIxLEAST64 " but must be a multiple of "
5734                            "device limit minUniformBufferOffsetAlignment %#" PRIxLEAST64,
5735                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
5736            }
5737        }
5738        if (usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) {
5739            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) !=
5740                0) {
5741                skipCall |=
5742                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5743                            0, __LINE__, DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
5744                            "vkBindBufferMemory(): memoryOffset is %#" PRIxLEAST64 " but must be a multiple of "
5745                            "device limit minStorageBufferOffsetAlignment %#" PRIxLEAST64,
5746                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
5747            }
5748        }
5749    }
5750    print_mem_list(dev_data);
5751    lock.unlock();
5752    if (!skipCall) {
5753        result = dev_data->device_dispatch_table->BindBufferMemory(device, buffer, mem, memoryOffset);
5754    }
5755    return result;
5756}
5757
5758VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5759vkGetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
5760    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5761    // TODO : What to track here?
5762    //   Could potentially save returned mem requirements and validate values passed into BindBufferMemory
5763    my_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
5764}
5765
5766VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5767vkGetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5768    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5769    // TODO : What to track here?
5770    //   Could potentially save returned mem requirements and validate values passed into BindImageMemory
5771    my_data->device_dispatch_table->GetImageMemoryRequirements(device, image, pMemoryRequirements);
5772}
5773#endif
5774VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5775vkDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
5776    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5777        ->device_dispatch_table->DestroyImageView(device, imageView, pAllocator);
5778    // TODO : Clean up any internal data structures using this obj.
5779}
5780
5781VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5782vkDestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
5783    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5784
5785    std::unique_lock<std::mutex> lock(global_lock);
5786    my_data->shaderModuleMap.erase(shaderModule);
5787    lock.unlock();
5788
5789    my_data->device_dispatch_table->DestroyShaderModule(device, shaderModule, pAllocator);
5790}
5791
5792VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5793vkDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
5794    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyPipeline(device, pipeline, pAllocator);
5795    // TODO : Clean up any internal data structures using this obj.
5796}
5797
5798VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5799vkDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
5800    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5801        ->device_dispatch_table->DestroyPipelineLayout(device, pipelineLayout, pAllocator);
5802    // TODO : Clean up any internal data structures using this obj.
5803}
5804
5805VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5806vkDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
5807    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroySampler(device, sampler, pAllocator);
5808    // TODO : Clean up any internal data structures using this obj.
5809}
5810
5811VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5812vkDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
5813    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5814        ->device_dispatch_table->DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
5815    // TODO : Clean up any internal data structures using this obj.
5816}
5817
5818VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5819vkDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
5820    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5821        ->device_dispatch_table->DestroyDescriptorPool(device, descriptorPool, pAllocator);
5822    // TODO : Clean up any internal data structures using this obj.
5823}
5824// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip_call result
5825//  If this is a secondary command buffer, then make sure its primary is also in-flight
5826//  If primary is not in-flight, then remove secondary from global in-flight set
5827// This function is only valid at a point when cmdBuffer is being reset or freed
5828static bool checkAndClearCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action) {
5829    bool skip_call = false;
5830    if (dev_data->globalInFlightCmdBuffers.count(cb_node->commandBuffer)) {
5831        // Primary CB or secondary where primary is also in-flight is an error
5832        if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_SECONDARY) ||
5833            (dev_data->globalInFlightCmdBuffers.count(cb_node->primaryCommandBuffer))) {
5834            skip_call |= log_msg(
5835                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5836                reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
5837                "Attempt to %s command buffer (%#" PRIxLEAST64 ") which is in use.", action,
5838                reinterpret_cast<const uint64_t &>(cb_node->commandBuffer));
5839        } else { // Secondary CB w/o primary in-flight, remove from in-flight
5840            dev_data->globalInFlightCmdBuffers.erase(cb_node->commandBuffer);
5841        }
5842    }
5843    return skip_call;
5844}
5845// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
5846static bool checkAndClearCommandBuffersInFlight(layer_data *dev_data, const VkCommandPool commandPool, const char *action) {
5847    bool skip_call = false;
5848    auto pool_data = dev_data->commandPoolMap.find(commandPool);
5849    if (pool_data != dev_data->commandPoolMap.end()) {
5850        for (auto cmd_buffer : pool_data->second.commandBuffers) {
5851            if (dev_data->globalInFlightCmdBuffers.count(cmd_buffer)) {
5852                skip_call |= checkAndClearCommandBufferInFlight(dev_data, getCBNode(dev_data, cmd_buffer), action);
5853            }
5854        }
5855    }
5856    return skip_call;
5857}
5858
5859VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5860vkFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
5861    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5862
5863    bool skip_call = false;
5864    std::unique_lock<std::mutex> lock(global_lock);
5865    for (uint32_t i = 0; i < commandBufferCount; i++) {
5866        auto cb_pair = dev_data->commandBufferMap.find(pCommandBuffers[i]);
5867        skip_call |= checkAndClearCommandBufferInFlight(dev_data, cb_pair->second, "free");
5868        // Delete CB information structure, and remove from commandBufferMap
5869        if (cb_pair != dev_data->commandBufferMap.end()) {
5870            // reset prior to delete for data clean-up
5871            resetCB(dev_data, (*cb_pair).second->commandBuffer);
5872            delete (*cb_pair).second;
5873            dev_data->commandBufferMap.erase(cb_pair);
5874        }
5875
5876        // Remove commandBuffer reference from commandPoolMap
5877        dev_data->commandPoolMap[commandPool].commandBuffers.remove(pCommandBuffers[i]);
5878    }
5879#if MTMERGESOURCE
5880    printCBList(dev_data);
5881#endif
5882    lock.unlock();
5883
5884    if (!skip_call)
5885        dev_data->device_dispatch_table->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
5886}
5887
5888VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
5889                                                                   const VkAllocationCallbacks *pAllocator,
5890                                                                   VkCommandPool *pCommandPool) {
5891    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5892
5893    VkResult result = dev_data->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
5894
5895    if (VK_SUCCESS == result) {
5896        std::lock_guard<std::mutex> lock(global_lock);
5897        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
5898        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
5899    }
5900    return result;
5901}
5902
5903VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
5904                                                                 const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
5905
5906    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5907    VkResult result = dev_data->device_dispatch_table->CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
5908    if (result == VK_SUCCESS) {
5909        std::lock_guard<std::mutex> lock(global_lock);
5910        dev_data->queryPoolMap[*pQueryPool].createInfo = *pCreateInfo;
5911    }
5912    return result;
5913}
5914
5915// Destroy commandPool along with all of the commandBuffers allocated from that pool
5916VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5917vkDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
5918    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5919    bool skipCall = false;
5920    std::unique_lock<std::mutex> lock(global_lock);
5921    // Verify that command buffers in pool are complete (not in-flight)
5922    VkBool32 result = checkAndClearCommandBuffersInFlight(dev_data, commandPool, "destroy command pool with");
5923    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandPoolMap
5924    if (dev_data->commandPoolMap.find(commandPool) != dev_data->commandPoolMap.end()) {
5925        for (auto poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
5926             poolCb != dev_data->commandPoolMap[commandPool].commandBuffers.end();) {
5927            clear_cmd_buf_and_mem_references(dev_data, *poolCb);
5928            auto del_cb = dev_data->commandBufferMap.find(*poolCb);
5929            delete (*del_cb).second;                  // delete CB info structure
5930            dev_data->commandBufferMap.erase(del_cb); // Remove this command buffer
5931            poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.erase(
5932                poolCb); // Remove CB reference from commandPoolMap's list
5933        }
5934    }
5935    dev_data->commandPoolMap.erase(commandPool);
5936
5937    lock.unlock();
5938
5939    if (result)
5940        return;
5941
5942    if (!skipCall)
5943        dev_data->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator);
5944}
5945
5946VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5947vkResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
5948    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5949    bool skipCall = false;
5950    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5951
5952    if (checkAndClearCommandBuffersInFlight(dev_data, commandPool, "reset command pool with"))
5953        return VK_ERROR_VALIDATION_FAILED_EXT;
5954
5955    if (!skipCall)
5956        result = dev_data->device_dispatch_table->ResetCommandPool(device, commandPool, flags);
5957
5958    // Reset all of the CBs allocated from this pool
5959    if (VK_SUCCESS == result) {
5960        std::lock_guard<std::mutex> lock(global_lock);
5961        auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
5962        while (it != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
5963            resetCB(dev_data, (*it));
5964            ++it;
5965        }
5966    }
5967    return result;
5968}
5969
5970VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
5971    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5972    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5973    bool skipCall = false;
5974    std::unique_lock<std::mutex> lock(global_lock);
5975    for (uint32_t i = 0; i < fenceCount; ++i) {
5976        auto fence_item = dev_data->fenceMap.find(pFences[i]);
5977        if (fence_item != dev_data->fenceMap.end()) {
5978            fence_item->second.needsSignaled = true;
5979            if (fence_item->second.in_use.load()) {
5980                skipCall |=
5981                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5982                            reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5983                            "Fence %#" PRIx64 " is in use by a command buffer.", reinterpret_cast<const uint64_t &>(pFences[i]));
5984            }
5985        }
5986    }
5987    lock.unlock();
5988    if (!skipCall)
5989        result = dev_data->device_dispatch_table->ResetFences(device, fenceCount, pFences);
5990    return result;
5991}
5992
5993VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5994vkDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
5995    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5996    std::unique_lock<std::mutex> lock(global_lock);
5997    auto fbNode = dev_data->frameBufferMap.find(framebuffer);
5998    if (fbNode != dev_data->frameBufferMap.end()) {
5999        for (auto cb : fbNode->second.referencingCmdBuffers) {
6000            auto cbNode = dev_data->commandBufferMap.find(cb);
6001            if (cbNode != dev_data->commandBufferMap.end()) {
6002                // Set CB as invalid and record destroyed framebuffer
6003                cbNode->second->state = CB_INVALID;
6004                cbNode->second->destroyedFramebuffers.insert(framebuffer);
6005            }
6006        }
6007        delete [] fbNode->second.createInfo.pAttachments;
6008        dev_data->frameBufferMap.erase(fbNode);
6009    }
6010    lock.unlock();
6011    dev_data->device_dispatch_table->DestroyFramebuffer(device, framebuffer, pAllocator);
6012}
6013
6014VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6015vkDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
6016    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6017    dev_data->device_dispatch_table->DestroyRenderPass(device, renderPass, pAllocator);
6018    std::lock_guard<std::mutex> lock(global_lock);
6019    dev_data->renderPassMap.erase(renderPass);
6020}
6021
6022VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
6023                                                              const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
6024    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6025
6026    VkResult result = dev_data->device_dispatch_table->CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
6027
6028    if (VK_SUCCESS == result) {
6029        std::lock_guard<std::mutex> lock(global_lock);
6030        // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
6031        dev_data->bufferMap[*pBuffer].createInfo = *pCreateInfo;
6032        dev_data->bufferMap[*pBuffer].in_use.store(0);
6033    }
6034    return result;
6035}
6036
6037VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
6038                                                                  const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
6039    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6040    VkResult result = dev_data->device_dispatch_table->CreateBufferView(device, pCreateInfo, pAllocator, pView);
6041    if (VK_SUCCESS == result) {
6042        std::lock_guard<std::mutex> lock(global_lock);
6043        dev_data->bufferViewMap[*pView] = VkBufferViewCreateInfo(*pCreateInfo);
6044#if MTMERGESOURCE
6045        // In order to create a valid buffer view, the buffer must have been created with at least one of the
6046        // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
6047        validate_buffer_usage_flags(dev_data, pCreateInfo->buffer,
6048                                    VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, false,
6049                                    "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
6050#endif
6051    }
6052    return result;
6053}
6054
6055VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
6056                                                             const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
6057    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6058
6059    VkResult result = dev_data->device_dispatch_table->CreateImage(device, pCreateInfo, pAllocator, pImage);
6060
6061    if (VK_SUCCESS == result) {
6062        std::lock_guard<std::mutex> lock(global_lock);
6063        IMAGE_LAYOUT_NODE image_node;
6064        image_node.layout = pCreateInfo->initialLayout;
6065        image_node.format = pCreateInfo->format;
6066        dev_data->imageMap[*pImage].createInfo = *pCreateInfo;
6067        ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()};
6068        dev_data->imageSubresourceMap[*pImage].push_back(subpair);
6069        dev_data->imageLayoutMap[subpair] = image_node;
6070    }
6071    return result;
6072}
6073
6074static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
6075    /* expects global_lock to be held by caller */
6076
6077    auto image_node_it = dev_data->imageMap.find(image);
6078    if (image_node_it != dev_data->imageMap.end()) {
6079        /* If the caller used the special values VK_REMAINING_MIP_LEVELS and
6080         * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to
6081         * the actual values.
6082         */
6083        if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
6084            range->levelCount = image_node_it->second.createInfo.mipLevels - range->baseMipLevel;
6085        }
6086
6087        if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
6088            range->layerCount = image_node_it->second.createInfo.arrayLayers - range->baseArrayLayer;
6089        }
6090    }
6091}
6092
6093// Return the correct layer/level counts if the caller used the special
6094// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
6095static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
6096                                         VkImage image) {
6097    /* expects global_lock to be held by caller */
6098
6099    *levels = range.levelCount;
6100    *layers = range.layerCount;
6101    auto image_node_it = dev_data->imageMap.find(image);
6102    if (image_node_it != dev_data->imageMap.end()) {
6103        if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
6104            *levels = image_node_it->second.createInfo.mipLevels - range.baseMipLevel;
6105        }
6106        if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
6107            *layers = image_node_it->second.createInfo.arrayLayers - range.baseArrayLayer;
6108        }
6109    }
6110}
6111
6112VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
6113                                                                 const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
6114    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6115    VkResult result = dev_data->device_dispatch_table->CreateImageView(device, pCreateInfo, pAllocator, pView);
6116    if (VK_SUCCESS == result) {
6117        std::lock_guard<std::mutex> lock(global_lock);
6118        VkImageViewCreateInfo localCI = VkImageViewCreateInfo(*pCreateInfo);
6119        ResolveRemainingLevelsLayers(dev_data, &localCI.subresourceRange, pCreateInfo->image);
6120        dev_data->imageViewMap[*pView] = localCI;
6121#if MTMERGESOURCE
6122        // Validate that img has correct usage flags set
6123        validate_image_usage_flags(dev_data, pCreateInfo->image,
6124                                   VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT |
6125                                       VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
6126                                   false, "vkCreateImageView()", "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT]_BIT");
6127#endif
6128    }
6129    return result;
6130}
6131
6132VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6133vkCreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
6134    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6135    VkResult result = dev_data->device_dispatch_table->CreateFence(device, pCreateInfo, pAllocator, pFence);
6136    if (VK_SUCCESS == result) {
6137        std::lock_guard<std::mutex> lock(global_lock);
6138        auto &fence_node = dev_data->fenceMap[*pFence];
6139        fence_node.createInfo = *pCreateInfo;
6140        fence_node.needsSignaled = true;
6141        if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
6142            fence_node.firstTimeFlag = true;
6143            fence_node.needsSignaled = false;
6144        }
6145        fence_node.in_use.store(0);
6146    }
6147    return result;
6148}
6149
6150// TODO handle pipeline caches
6151VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
6152                                                     const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
6153    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6154    VkResult result = dev_data->device_dispatch_table->CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
6155    return result;
6156}
6157
6158VKAPI_ATTR void VKAPI_CALL
6159vkDestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
6160    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6161    dev_data->device_dispatch_table->DestroyPipelineCache(device, pipelineCache, pAllocator);
6162}
6163
6164VKAPI_ATTR VkResult VKAPI_CALL
6165vkGetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
6166    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6167    VkResult result = dev_data->device_dispatch_table->GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
6168    return result;
6169}
6170
6171VKAPI_ATTR VkResult VKAPI_CALL
6172vkMergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
6173    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6174    VkResult result = dev_data->device_dispatch_table->MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
6175    return result;
6176}
6177
6178// utility function to set collective state for pipeline
6179void set_pipeline_state(PIPELINE_NODE *pPipe) {
6180    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
6181    if (pPipe->graphicsPipelineCI.pColorBlendState) {
6182        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
6183            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
6184                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6185                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6186                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6187                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6188                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6189                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6190                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6191                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
6192                    pPipe->blendConstantsEnabled = true;
6193                }
6194            }
6195        }
6196    }
6197}
6198
6199VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6200vkCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6201                          const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6202                          VkPipeline *pPipelines) {
6203    VkResult result = VK_SUCCESS;
6204    // TODO What to do with pipelineCache?
6205    // The order of operations here is a little convoluted but gets the job done
6206    //  1. Pipeline create state is first shadowed into PIPELINE_NODE struct
6207    //  2. Create state is then validated (which uses flags setup during shadowing)
6208    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
6209    bool skipCall = false;
6210    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6211    vector<PIPELINE_NODE *> pPipeNode(count);
6212    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6213
6214    uint32_t i = 0;
6215    std::unique_lock<std::mutex> lock(global_lock);
6216
6217    for (i = 0; i < count; i++) {
6218        pPipeNode[i] = new PIPELINE_NODE;
6219        pPipeNode[i]->initGraphicsPipeline(&pCreateInfos[i]);
6220        skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode, i);
6221    }
6222
6223    if (!skipCall) {
6224        lock.unlock();
6225        result = dev_data->device_dispatch_table->CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6226                                                                          pPipelines);
6227        lock.lock();
6228        for (i = 0; i < count; i++) {
6229            pPipeNode[i]->pipeline = pPipelines[i];
6230            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6231        }
6232        lock.unlock();
6233    } else {
6234        for (i = 0; i < count; i++) {
6235            delete pPipeNode[i];
6236        }
6237        lock.unlock();
6238        return VK_ERROR_VALIDATION_FAILED_EXT;
6239    }
6240    return result;
6241}
6242
6243VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6244vkCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6245                         const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6246                         VkPipeline *pPipelines) {
6247    VkResult result = VK_SUCCESS;
6248    bool skipCall = false;
6249
6250    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6251    vector<PIPELINE_NODE *> pPipeNode(count);
6252    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6253
6254    uint32_t i = 0;
6255    std::unique_lock<std::mutex> lock(global_lock);
6256    for (i = 0; i < count; i++) {
6257        // TODO: Verify compute stage bits
6258
6259        // Create and initialize internal tracking data structure
6260        pPipeNode[i] = new PIPELINE_NODE;
6261        pPipeNode[i]->initComputePipeline(&pCreateInfos[i]);
6262        // memcpy(&pPipeNode[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo));
6263
6264        // TODO: Add Compute Pipeline Verification
6265        // skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode[i]);
6266    }
6267
6268    if (!skipCall) {
6269        lock.unlock();
6270        result = dev_data->device_dispatch_table->CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6271                                                                         pPipelines);
6272        lock.lock();
6273        for (i = 0; i < count; i++) {
6274            pPipeNode[i]->pipeline = pPipelines[i];
6275            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6276        }
6277        lock.unlock();
6278    } else {
6279        for (i = 0; i < count; i++) {
6280            // Clean up any locally allocated data structures
6281            delete pPipeNode[i];
6282        }
6283        lock.unlock();
6284        return VK_ERROR_VALIDATION_FAILED_EXT;
6285    }
6286    return result;
6287}
6288
6289VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
6290                                                               const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
6291    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6292    VkResult result = dev_data->device_dispatch_table->CreateSampler(device, pCreateInfo, pAllocator, pSampler);
6293    if (VK_SUCCESS == result) {
6294        std::lock_guard<std::mutex> lock(global_lock);
6295        dev_data->sampleMap[*pSampler] = unique_ptr<SAMPLER_NODE>(new SAMPLER_NODE(pSampler, pCreateInfo));
6296    }
6297    return result;
6298}
6299
6300VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6301vkCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
6302                            const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
6303    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6304    VkResult result = dev_data->device_dispatch_table->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
6305    if (VK_SUCCESS == result) {
6306        // TODOSC : Capture layout bindings set
6307        std::lock_guard<std::mutex> lock(global_lock);
6308        dev_data->descriptorSetLayoutMap[*pSetLayout] = DescriptorSetLayout(dev_data->report_data, pCreateInfo, *pSetLayout);
6309    }
6310    return result;
6311}
6312
6313static bool validatePushConstantSize(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
6314                                     const char *caller_name) {
6315    bool skipCall = false;
6316    if ((offset + size) > dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize) {
6317        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6318                           DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that "
6319                                                                 "exceeds this device's maxPushConstantSize of %u.",
6320                           caller_name, offset, size, dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize);
6321    }
6322    return skipCall;
6323}
6324
6325VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
6326                                                      const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
6327    bool skipCall = false;
6328    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6329    uint32_t i = 0;
6330    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6331        skipCall |= validatePushConstantSize(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
6332                                             pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()");
6333        if ((pCreateInfo->pPushConstantRanges[i].size == 0) || ((pCreateInfo->pPushConstantRanges[i].size & 0x3) != 0)) {
6334            skipCall |=
6335                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6336                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constant index %u with "
6337                                                              "size %u. Size must be greater than zero and a multiple of 4.",
6338                        i, pCreateInfo->pPushConstantRanges[i].size);
6339        }
6340        // TODO : Add warning if ranges overlap
6341    }
6342    VkResult result = dev_data->device_dispatch_table->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
6343    if (VK_SUCCESS == result) {
6344        std::lock_guard<std::mutex> lock(global_lock);
6345        // TODOSC : Merge capture of the setLayouts per pipeline
6346        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
6347        plNode.descriptorSetLayouts.resize(pCreateInfo->setLayoutCount);
6348        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
6349            plNode.descriptorSetLayouts[i] = pCreateInfo->pSetLayouts[i];
6350        }
6351        plNode.pushConstantRanges.resize(pCreateInfo->pushConstantRangeCount);
6352        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6353            plNode.pushConstantRanges[i] = pCreateInfo->pPushConstantRanges[i];
6354        }
6355    }
6356    return result;
6357}
6358
6359VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6360vkCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
6361                       VkDescriptorPool *pDescriptorPool) {
6362    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6363    VkResult result = dev_data->device_dispatch_table->CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
6364    if (VK_SUCCESS == result) {
6365        // Insert this pool into Global Pool LL at head
6366        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6367                    (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool %#" PRIxLEAST64,
6368                    (uint64_t)*pDescriptorPool))
6369            return VK_ERROR_VALIDATION_FAILED_EXT;
6370        DESCRIPTOR_POOL_NODE *pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo);
6371        if (NULL == pNewNode) {
6372            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6373                        (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6374                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()"))
6375                return VK_ERROR_VALIDATION_FAILED_EXT;
6376        } else {
6377            std::lock_guard<std::mutex> lock(global_lock);
6378            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
6379        }
6380    } else {
6381        // Need to do anything if pool create fails?
6382    }
6383    return result;
6384}
6385
6386VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6387vkResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
6388    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6389    VkResult result = dev_data->device_dispatch_table->ResetDescriptorPool(device, descriptorPool, flags);
6390    if (VK_SUCCESS == result) {
6391        std::lock_guard<std::mutex> lock(global_lock);
6392        clearDescriptorPool(dev_data, device, descriptorPool, flags);
6393    }
6394    return result;
6395}
6396
6397VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6398vkAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
6399    bool skipCall = false;
6400    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6401
6402    std::unique_lock<std::mutex> lock(global_lock);
6403    // Verify that requested descriptorSets are available in pool
6404    DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool);
6405    if (!pPoolNode) {
6406        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6407                            (uint64_t)pAllocateInfo->descriptorPool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
6408                            "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
6409                            (uint64_t)pAllocateInfo->descriptorPool);
6410    } else { // Make sure pool has all the available descriptors before calling down chain
6411        skipCall |= validate_descriptor_availability_in_pool(dev_data, pPoolNode, pAllocateInfo->descriptorSetCount,
6412                                                             pAllocateInfo->pSetLayouts);
6413    }
6414    lock.unlock();
6415    if (skipCall)
6416        return VK_ERROR_VALIDATION_FAILED_EXT;
6417    VkResult result = dev_data->device_dispatch_table->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
6418    if (VK_SUCCESS == result) {
6419        lock.lock();
6420        DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool);
6421        if (pPoolNode) {
6422            if (pAllocateInfo->descriptorSetCount == 0) {
6423                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6424                        pAllocateInfo->descriptorSetCount, __LINE__, DRAWSTATE_NONE, "DS",
6425                        "AllocateDescriptorSets called with 0 count");
6426            }
6427            for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
6428                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6429                        (uint64_t)pDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS", "Created Descriptor Set %#" PRIxLEAST64,
6430                        (uint64_t)pDescriptorSets[i]);
6431                // Create new set node and add to head of pool nodes
6432                SET_NODE *pNewNode = new SET_NODE;
6433                if (NULL == pNewNode) {
6434                    if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6435                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6436                                DRAWSTATE_OUT_OF_MEMORY, "DS",
6437                                "Out of memory while attempting to allocate SET_NODE in vkAllocateDescriptorSets()")) {
6438                        lock.unlock();
6439                        return VK_ERROR_VALIDATION_FAILED_EXT;
6440                    }
6441                } else {
6442                    // TODO : Pool should store a total count of each type of Descriptor available
6443                    //  When descriptors are allocated, decrement the count and validate here
6444                    //  that the count doesn't go below 0. One reset/free need to bump count back up.
6445                    // Insert set at head of Set LL for this pool
6446                    pNewNode->pNext = pPoolNode->pSets;
6447                    pNewNode->in_use.store(0);
6448                    pPoolNode->pSets = pNewNode;
6449                    auto layout_pair = dev_data->descriptorSetLayoutMap.find(pAllocateInfo->pSetLayouts[i]);
6450                    if (layout_pair == dev_data->descriptorSetLayoutMap.end()) {
6451                        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6452                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)pAllocateInfo->pSetLayouts[i],
6453                                    __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
6454                                    "Unable to find set layout node for layout %#" PRIxLEAST64
6455                                    " specified in vkAllocateDescriptorSets() call",
6456                                    (uint64_t)pAllocateInfo->pSetLayouts[i])) {
6457                            lock.unlock();
6458                            return VK_ERROR_VALIDATION_FAILED_EXT;
6459                        }
6460                    }
6461                    pNewNode->p_layout = &layout_pair->second;
6462                    pNewNode->pool = pAllocateInfo->descriptorPool;
6463                    pNewNode->set = pDescriptorSets[i];
6464                    pNewNode->descriptorCount = layout_pair->second.GetTotalDescriptorCount();
6465                    if (pNewNode->descriptorCount) {
6466                        pNewNode->pDescriptorUpdates.resize(pNewNode->descriptorCount);
6467                    }
6468                    dev_data->setMap[pDescriptorSets[i]] = pNewNode;
6469                }
6470            }
6471        }
6472        lock.unlock();
6473    }
6474    return result;
6475}
6476
6477VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6478vkFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
6479    bool skipCall = false;
6480    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6481    // Make sure that no sets being destroyed are in-flight
6482    std::unique_lock<std::mutex> lock(global_lock);
6483    for (uint32_t i = 0; i < count; ++i)
6484        skipCall |= validateIdleDescriptorSet(dev_data, pDescriptorSets[i], "vkFreeDescriptorSets");
6485    DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, descriptorPool);
6486    if (pPoolNode && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pPoolNode->createInfo.flags)) {
6487        // Can't Free from a NON_FREE pool
6488        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
6489                            (uint64_t)device, __LINE__, DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, "DS",
6490                            "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
6491                            "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
6492    }
6493    lock.unlock();
6494    if (skipCall)
6495        return VK_ERROR_VALIDATION_FAILED_EXT;
6496    VkResult result = dev_data->device_dispatch_table->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
6497    if (VK_SUCCESS == result) {
6498        lock.lock();
6499
6500        // Update available descriptor sets in pool
6501        pPoolNode->availableSets += count;
6502
6503        // For each freed descriptor add it back into the pool as available
6504        for (uint32_t i = 0; i < count; ++i) {
6505            SET_NODE *pSet = dev_data->setMap[pDescriptorSets[i]]; // getSetNode() without locking
6506            invalidateBoundCmdBuffers(dev_data, pSet);
6507            auto p_layout = pSet->p_layout;
6508            uint32_t typeIndex = 0, poolSizeCount = 0;
6509            for (uint32_t j = 0; j < p_layout->GetBindingCount(); ++j) {
6510                auto layout_binding = p_layout->GetDescriptorSetLayoutBindingPtrFromIndex(j);
6511                typeIndex = static_cast<uint32_t>(layout_binding->descriptorType);
6512                poolSizeCount = layout_binding->descriptorCount;
6513                pPoolNode->availableDescriptorTypeCount[typeIndex] += poolSizeCount;
6514            }
6515        }
6516        lock.unlock();
6517    }
6518    // TODO : Any other clean-up or book-keeping to do here?
6519    return result;
6520}
6521
6522VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6523vkUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
6524                       uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
6525    // dsUpdate will return true only if a bailout error occurs, so we want to call down tree when update returns false
6526    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6527    std::unique_lock<std::mutex> lock(global_lock);
6528    bool rtn = dsUpdate(dev_data, device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
6529    lock.unlock();
6530    if (!rtn) {
6531        dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6532                                                              pDescriptorCopies);
6533    }
6534}
6535
6536VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6537vkAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
6538    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6539    VkResult result = dev_data->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
6540    if (VK_SUCCESS == result) {
6541        std::unique_lock<std::mutex> lock(global_lock);
6542        auto const &cp_it = dev_data->commandPoolMap.find(pCreateInfo->commandPool);
6543        if (cp_it != dev_data->commandPoolMap.end()) {
6544            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
6545                // Add command buffer to its commandPool map
6546                cp_it->second.commandBuffers.push_back(pCommandBuffer[i]);
6547                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
6548                // Add command buffer to map
6549                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
6550                resetCB(dev_data, pCommandBuffer[i]);
6551                pCB->createInfo = *pCreateInfo;
6552                pCB->device = device;
6553            }
6554        }
6555#if MTMERGESOURCE
6556        printCBList(dev_data);
6557#endif
6558        lock.unlock();
6559    }
6560    return result;
6561}
6562
6563VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6564vkBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
6565    bool skipCall = false;
6566    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6567    std::unique_lock<std::mutex> lock(global_lock);
6568    // Validate command buffer level
6569    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6570    if (pCB) {
6571        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
6572        if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
6573            skipCall |=
6574                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6575                        (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6576                        "Calling vkBeginCommandBuffer() on active CB %p before it has completed. "
6577                        "You must check CB fence before this call.",
6578                        commandBuffer);
6579        }
6580        clear_cmd_buf_and_mem_references(dev_data, pCB);
6581        if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
6582            // Secondary Command Buffer
6583            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
6584            if (!pInfo) {
6585                skipCall |=
6586                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6587                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6588                            "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must have inheritance info.",
6589                            reinterpret_cast<void *>(commandBuffer));
6590            } else {
6591                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
6592                    if (!pInfo->renderPass) { // renderpass should NOT be null for a Secondary CB
6593                        skipCall |= log_msg(
6594                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6595                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6596                            "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must specify a valid renderpass parameter.",
6597                            reinterpret_cast<void *>(commandBuffer));
6598                    }
6599                    if (!pInfo->framebuffer) { // framebuffer may be null for a Secondary CB, but this affects perf
6600                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
6601                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6602                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE,
6603                                            "DS", "vkBeginCommandBuffer(): Secondary Command Buffers (%p) may perform better if a "
6604                                                  "valid framebuffer parameter is specified.",
6605                                            reinterpret_cast<void *>(commandBuffer));
6606                    } else {
6607                        string errorString = "";
6608                        auto fbNode = dev_data->frameBufferMap.find(pInfo->framebuffer);
6609                        if (fbNode != dev_data->frameBufferMap.end()) {
6610                            VkRenderPass fbRP = fbNode->second.createInfo.renderPass;
6611                            if (!verify_renderpass_compatibility(dev_data, fbRP, pInfo->renderPass, errorString)) {
6612                                // renderPass that framebuffer was created with must be compatible with local renderPass
6613                                skipCall |=
6614                                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6615                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6616                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
6617                                            "DS", "vkBeginCommandBuffer(): Secondary Command "
6618                                                  "Buffer (%p) renderPass (%#" PRIxLEAST64 ") is incompatible w/ framebuffer "
6619                                                  "(%#" PRIxLEAST64 ") w/ render pass (%#" PRIxLEAST64 ") due to: %s",
6620                                            reinterpret_cast<void *>(commandBuffer), (uint64_t)(pInfo->renderPass),
6621                                            (uint64_t)(pInfo->framebuffer), (uint64_t)(fbRP), errorString.c_str());
6622                            }
6623                            // Connect this framebuffer to this cmdBuffer
6624                            fbNode->second.referencingCmdBuffers.insert(pCB->commandBuffer);
6625                        }
6626                    }
6627                }
6628                if ((pInfo->occlusionQueryEnable == VK_FALSE ||
6629                     dev_data->phys_dev_properties.features.occlusionQueryPrecise == VK_FALSE) &&
6630                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
6631                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6632                                        VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
6633                                        __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6634                                        "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must not have "
6635                                        "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
6636                                        "support precise occlusion queries.",
6637                                        reinterpret_cast<void *>(commandBuffer));
6638                }
6639            }
6640            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
6641                auto rp_data = dev_data->renderPassMap.find(pInfo->renderPass);
6642                if (rp_data != dev_data->renderPassMap.end() && rp_data->second && rp_data->second->pCreateInfo) {
6643                    if (pInfo->subpass >= rp_data->second->pCreateInfo->subpassCount) {
6644                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6645                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
6646                                            DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6647                                            "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must has a subpass index (%d) "
6648                                            "that is less than the number of subpasses (%d).",
6649                                            (void *)commandBuffer, pInfo->subpass, rp_data->second->pCreateInfo->subpassCount);
6650                    }
6651                }
6652            }
6653        }
6654        if (CB_RECORDING == pCB->state) {
6655            skipCall |=
6656                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6657                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6658                        "vkBeginCommandBuffer(): Cannot call Begin on CB (%#" PRIxLEAST64
6659                        ") in the RECORDING state. Must first call vkEndCommandBuffer().",
6660                        (uint64_t)commandBuffer);
6661        } else if (CB_RECORDED == pCB->state) {
6662            VkCommandPool cmdPool = pCB->createInfo.commandPool;
6663            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
6664                skipCall |=
6665                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6666                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6667                            "Call to vkBeginCommandBuffer() on command buffer (%#" PRIxLEAST64
6668                            ") attempts to implicitly reset cmdBuffer created from command pool (%#" PRIxLEAST64
6669                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6670                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
6671            }
6672            resetCB(dev_data, commandBuffer);
6673        }
6674        // Set updated state here in case implicit reset occurs above
6675        pCB->state = CB_RECORDING;
6676        pCB->beginInfo = *pBeginInfo;
6677        if (pCB->beginInfo.pInheritanceInfo) {
6678            pCB->inheritanceInfo = *(pCB->beginInfo.pInheritanceInfo);
6679            pCB->beginInfo.pInheritanceInfo = &pCB->inheritanceInfo;
6680        }
6681    } else {
6682        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6683                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
6684                            "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB %p!", (void *)commandBuffer);
6685    }
6686    lock.unlock();
6687    if (skipCall) {
6688        return VK_ERROR_VALIDATION_FAILED_EXT;
6689    }
6690    VkResult result = dev_data->device_dispatch_table->BeginCommandBuffer(commandBuffer, pBeginInfo);
6691
6692    return result;
6693}
6694
6695VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(VkCommandBuffer commandBuffer) {
6696    bool skipCall = false;
6697    VkResult result = VK_SUCCESS;
6698    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6699    std::unique_lock<std::mutex> lock(global_lock);
6700    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6701    if (pCB) {
6702        if (pCB->state != CB_RECORDING) {
6703            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkEndCommandBuffer()");
6704        }
6705        for (auto query : pCB->activeQueries) {
6706            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6707                                DRAWSTATE_INVALID_QUERY, "DS",
6708                                "Ending command buffer with in progress query: queryPool %" PRIu64 ", index %d",
6709                                (uint64_t)(query.pool), query.index);
6710        }
6711    }
6712    if (!skipCall) {
6713        lock.unlock();
6714        result = dev_data->device_dispatch_table->EndCommandBuffer(commandBuffer);
6715        lock.lock();
6716        if (VK_SUCCESS == result) {
6717            pCB->state = CB_RECORDED;
6718            // Reset CB status flags
6719            pCB->status = 0;
6720            printCB(dev_data, commandBuffer);
6721        }
6722    } else {
6723        result = VK_ERROR_VALIDATION_FAILED_EXT;
6724    }
6725    lock.unlock();
6726    return result;
6727}
6728
6729VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6730vkResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
6731    bool skip_call = false;
6732    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6733    std::unique_lock<std::mutex> lock(global_lock);
6734    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6735    VkCommandPool cmdPool = pCB->createInfo.commandPool;
6736    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
6737        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6738                             (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6739                             "Attempt to reset command buffer (%#" PRIxLEAST64 ") created from command pool (%#" PRIxLEAST64
6740                             ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6741                             (uint64_t)commandBuffer, (uint64_t)cmdPool);
6742    }
6743    skip_call |= checkAndClearCommandBufferInFlight(dev_data, pCB, "reset");
6744    lock.unlock();
6745    if (skip_call)
6746        return VK_ERROR_VALIDATION_FAILED_EXT;
6747    VkResult result = dev_data->device_dispatch_table->ResetCommandBuffer(commandBuffer, flags);
6748    if (VK_SUCCESS == result) {
6749        lock.lock();
6750        resetCB(dev_data, commandBuffer);
6751        lock.unlock();
6752    }
6753    return result;
6754}
6755
6756#if MTMERGESOURCE
6757// TODO : For any vkCmdBind* calls that include an object which has mem bound to it,
6758//    need to account for that mem now having binding to given commandBuffer
6759#endif
6760VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6761vkCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
6762    bool skipCall = false;
6763    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6764    std::unique_lock<std::mutex> lock(global_lock);
6765    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6766    if (pCB) {
6767        skipCall |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
6768        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) {
6769            skipCall |=
6770                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
6771                        (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
6772                        "Incorrectly binding compute pipeline (%#" PRIxLEAST64 ") during active RenderPass (%#" PRIxLEAST64 ")",
6773                        (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass);
6774        }
6775
6776        PIPELINE_NODE *pPN = getPipeline(dev_data, pipeline);
6777        if (pPN) {
6778            pCB->lastBound[pipelineBindPoint].pipeline = pipeline;
6779            set_cb_pso_status(pCB, pPN);
6780            set_pipeline_state(pPN);
6781            skipCall |= validatePipelineState(dev_data, pCB, pipelineBindPoint, pipeline);
6782        } else {
6783            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
6784                                (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
6785                                "Attempt to bind Pipeline %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline));
6786        }
6787    }
6788    lock.unlock();
6789    if (!skipCall)
6790        dev_data->device_dispatch_table->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
6791}
6792
6793VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6794vkCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
6795    bool skipCall = false;
6796    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6797    std::unique_lock<std::mutex> lock(global_lock);
6798    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6799    if (pCB) {
6800        skipCall |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
6801        pCB->status |= CBSTATUS_VIEWPORT_SET;
6802        pCB->viewports.resize(viewportCount);
6803        memcpy(pCB->viewports.data(), pViewports, viewportCount * sizeof(VkViewport));
6804    }
6805    lock.unlock();
6806    if (!skipCall)
6807        dev_data->device_dispatch_table->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
6808}
6809
6810VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6811vkCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
6812    bool skipCall = false;
6813    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6814    std::unique_lock<std::mutex> lock(global_lock);
6815    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6816    if (pCB) {
6817        skipCall |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
6818        pCB->status |= CBSTATUS_SCISSOR_SET;
6819        pCB->scissors.resize(scissorCount);
6820        memcpy(pCB->scissors.data(), pScissors, scissorCount * sizeof(VkRect2D));
6821    }
6822    lock.unlock();
6823    if (!skipCall)
6824        dev_data->device_dispatch_table->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
6825}
6826
6827VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
6828    bool skip_call = false;
6829    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6830    std::unique_lock<std::mutex> lock(global_lock);
6831    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6832    if (pCB) {
6833        skip_call |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
6834        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
6835
6836        PIPELINE_NODE *pPipeTrav = getPipeline(dev_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
6837        if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) {
6838            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
6839                                 reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SET, "DS",
6840                                 "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH"
6841                                 "flag.  This is undefined behavior and could be ignored.");
6842        } else {
6843            skip_call |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, reinterpret_cast<uint64_t &>(commandBuffer), lineWidth);
6844        }
6845    }
6846    lock.unlock();
6847    if (!skip_call)
6848        dev_data->device_dispatch_table->CmdSetLineWidth(commandBuffer, lineWidth);
6849}
6850
6851VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6852vkCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
6853    bool skipCall = false;
6854    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6855    std::unique_lock<std::mutex> lock(global_lock);
6856    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6857    if (pCB) {
6858        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
6859        pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
6860    }
6861    lock.unlock();
6862    if (!skipCall)
6863        dev_data->device_dispatch_table->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp,
6864                                                         depthBiasSlopeFactor);
6865}
6866
6867VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
6868    bool skipCall = false;
6869    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6870    std::unique_lock<std::mutex> lock(global_lock);
6871    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6872    if (pCB) {
6873        skipCall |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
6874        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
6875    }
6876    lock.unlock();
6877    if (!skipCall)
6878        dev_data->device_dispatch_table->CmdSetBlendConstants(commandBuffer, blendConstants);
6879}
6880
6881VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6882vkCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
6883    bool skipCall = false;
6884    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6885    std::unique_lock<std::mutex> lock(global_lock);
6886    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6887    if (pCB) {
6888        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
6889        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
6890    }
6891    lock.unlock();
6892    if (!skipCall)
6893        dev_data->device_dispatch_table->CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
6894}
6895
6896VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6897vkCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
6898    bool skipCall = false;
6899    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6900    std::unique_lock<std::mutex> lock(global_lock);
6901    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6902    if (pCB) {
6903        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
6904        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
6905    }
6906    lock.unlock();
6907    if (!skipCall)
6908        dev_data->device_dispatch_table->CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
6909}
6910
6911VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6912vkCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
6913    bool skipCall = false;
6914    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6915    std::unique_lock<std::mutex> lock(global_lock);
6916    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6917    if (pCB) {
6918        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
6919        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
6920    }
6921    lock.unlock();
6922    if (!skipCall)
6923        dev_data->device_dispatch_table->CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
6924}
6925
6926VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6927vkCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
6928    bool skipCall = false;
6929    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6930    std::unique_lock<std::mutex> lock(global_lock);
6931    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6932    if (pCB) {
6933        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
6934        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
6935    }
6936    lock.unlock();
6937    if (!skipCall)
6938        dev_data->device_dispatch_table->CmdSetStencilReference(commandBuffer, faceMask, reference);
6939}
6940
6941VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6942vkCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
6943                        uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
6944                        const uint32_t *pDynamicOffsets) {
6945    bool skipCall = false;
6946    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6947    std::unique_lock<std::mutex> lock(global_lock);
6948    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6949    if (pCB) {
6950        if (pCB->state == CB_RECORDING) {
6951            // Track total count of dynamic descriptor types to make sure we have an offset for each one
6952            uint32_t totalDynamicDescriptors = 0;
6953            string errorString = "";
6954            uint32_t lastSetIndex = firstSet + setCount - 1;
6955            if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size())
6956                pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
6957            VkDescriptorSet oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
6958            for (uint32_t i = 0; i < setCount; i++) {
6959                SET_NODE *pSet = getSetNode(dev_data, pDescriptorSets[i]);
6960                if (pSet) {
6961                    pCB->lastBound[pipelineBindPoint].uniqueBoundSets.insert(pDescriptorSets[i]);
6962                    pSet->boundCmdBuffers.insert(commandBuffer);
6963                    pCB->lastBound[pipelineBindPoint].pipelineLayout = layout;
6964                    pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pDescriptorSets[i];
6965                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
6966                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6967                                        DRAWSTATE_NONE, "DS", "DS %#" PRIxLEAST64 " bound on pipeline %s",
6968                                        (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
6969                    if (!pSet->pUpdateStructs && (pSet->descriptorCount != 0)) {
6970                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
6971                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
6972                                            __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
6973                                            "DS %#" PRIxLEAST64
6974                                            " bound but it was never updated. You may want to either update it or not bind it.",
6975                                            (uint64_t)pDescriptorSets[i]);
6976                    }
6977                    // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
6978                    if (!verify_set_layout_compatibility(dev_data, pSet, layout, i + firstSet, errorString)) {
6979                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6980                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
6981                                            __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
6982                                            "descriptorSet #%u being bound is not compatible with overlapping layout in "
6983                                            "pipelineLayout due to: %s",
6984                                            i, errorString.c_str());
6985                    }
6986                    if (pSet->p_layout->GetDynamicDescriptorCount()) {
6987                        // First make sure we won't overstep bounds of pDynamicOffsets array
6988                        if ((totalDynamicDescriptors + pSet->p_layout->GetDynamicDescriptorCount()) > dynamicOffsetCount) {
6989                            skipCall |=
6990                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6991                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6992                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
6993                                        "descriptorSet #%u (%#" PRIxLEAST64
6994                                        ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
6995                                        "array. There must be one dynamic offset for each dynamic descriptor being bound.",
6996                                        i, (uint64_t)pDescriptorSets[i], pSet->p_layout->GetDynamicDescriptorCount(),
6997                                        (dynamicOffsetCount - totalDynamicDescriptors));
6998                        } else { // Validate and store dynamic offsets with the set
6999                            // Validate Dynamic Offset Minimums
7000                            uint32_t cur_dyn_offset = totalDynamicDescriptors;
7001                            for (uint32_t d = 0; d < pSet->descriptorCount; d++) {
7002                                if (pSet->p_layout->GetTypeFromIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
7003                                    if (vk_safe_modulo(
7004                                            pDynamicOffsets[cur_dyn_offset],
7005                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
7006                                        skipCall |= log_msg(
7007                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7008                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7009                                            DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
7010                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7011                                            "device limit minUniformBufferOffsetAlignment %#" PRIxLEAST64,
7012                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7013                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
7014                                    }
7015                                    cur_dyn_offset++;
7016                                } else if (pSet->p_layout->GetTypeFromIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
7017                                    if (vk_safe_modulo(
7018                                            pDynamicOffsets[cur_dyn_offset],
7019                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
7020                                        skipCall |= log_msg(
7021                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7022                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7023                                            DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
7024                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7025                                            "device limit minStorageBufferOffsetAlignment %#" PRIxLEAST64,
7026                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7027                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
7028                                    }
7029                                    cur_dyn_offset++;
7030                                }
7031                            }
7032                            // Keep running total of dynamic descriptor count to verify at the end
7033                            totalDynamicDescriptors += pSet->p_layout->GetDynamicDescriptorCount();
7034                        }
7035                    }
7036                } else {
7037                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7038                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7039                                        DRAWSTATE_INVALID_SET, "DS", "Attempt to bind DS %#" PRIxLEAST64 " that doesn't exist!",
7040                                        (uint64_t)pDescriptorSets[i]);
7041                }
7042                skipCall |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
7043                // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
7044                if (firstSet > 0) { // Check set #s below the first bound set
7045                    for (uint32_t i = 0; i < firstSet; ++i) {
7046                        if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
7047                            !verify_set_layout_compatibility(
7048                                dev_data, dev_data->setMap[pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i]], layout, i,
7049                                errorString)) {
7050                            skipCall |= log_msg(
7051                                dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7052                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
7053                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
7054                                "DescriptorSetDS %#" PRIxLEAST64
7055                                " previously bound as set #%u was disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")",
7056                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
7057                            pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
7058                        }
7059                    }
7060                }
7061                // Check if newly last bound set invalidates any remaining bound sets
7062                if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
7063                    if (oldFinalBoundSet &&
7064                        !verify_set_layout_compatibility(dev_data, dev_data->setMap[oldFinalBoundSet], layout, lastSetIndex,
7065                                                         errorString)) {
7066                        skipCall |=
7067                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7068                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)oldFinalBoundSet, __LINE__,
7069                                    DRAWSTATE_NONE, "DS", "DescriptorSetDS %#" PRIxLEAST64
7070                                                          " previously bound as set #%u is incompatible with set %#" PRIxLEAST64
7071                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
7072                                                          "disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")",
7073                                    (uint64_t)oldFinalBoundSet, lastSetIndex,
7074                                    (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
7075                                    lastSetIndex + 1, (uint64_t)layout);
7076                        pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7077                    }
7078                }
7079            }
7080            //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7081            if (totalDynamicDescriptors != dynamicOffsetCount) {
7082                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7083                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
7084                                    DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7085                                    "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7086                                    "is %u. It should exactly match the number of dynamic descriptors.",
7087                                    setCount, totalDynamicDescriptors, dynamicOffsetCount);
7088            }
7089            // Save dynamicOffsets bound to this CB
7090            for (uint32_t i = 0; i < dynamicOffsetCount; i++) {
7091                pCB->lastBound[pipelineBindPoint].dynamicOffsets.emplace_back(pDynamicOffsets[i]);
7092            }
7093        } else {
7094            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
7095        }
7096    }
7097    lock.unlock();
7098    if (!skipCall)
7099        dev_data->device_dispatch_table->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
7100                                                               pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
7101}
7102
7103VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7104vkCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
7105    bool skipCall = false;
7106    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7107    std::unique_lock<std::mutex> lock(global_lock);
7108#if MTMERGESOURCE
7109    VkDeviceMemory mem;
7110    skipCall =
7111        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7112    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7113    if (cb_data != dev_data->commandBufferMap.end()) {
7114        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindIndexBuffer()"); };
7115        cb_data->second->validate_functions.push_back(function);
7116    }
7117    // TODO : Somewhere need to verify that IBs have correct usage state flagged
7118#endif
7119    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7120    if (pCB) {
7121        skipCall |= addCmd(dev_data, pCB, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
7122        VkDeviceSize offset_align = 0;
7123        switch (indexType) {
7124        case VK_INDEX_TYPE_UINT16:
7125            offset_align = 2;
7126            break;
7127        case VK_INDEX_TYPE_UINT32:
7128            offset_align = 4;
7129            break;
7130        default:
7131            // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
7132            break;
7133        }
7134        if (!offset_align || (offset % offset_align)) {
7135            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7136                                DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
7137                                "vkCmdBindIndexBuffer() offset (%#" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
7138                                offset, string_VkIndexType(indexType));
7139        }
7140        pCB->status |= CBSTATUS_INDEX_BUFFER_BOUND;
7141    }
7142    lock.unlock();
7143    if (!skipCall)
7144        dev_data->device_dispatch_table->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
7145}
7146
7147void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
7148    uint32_t end = firstBinding + bindingCount;
7149    if (pCB->currentDrawData.buffers.size() < end) {
7150        pCB->currentDrawData.buffers.resize(end);
7151    }
7152    for (uint32_t i = 0; i < bindingCount; ++i) {
7153        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
7154    }
7155}
7156
7157static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
7158
7159VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
7160                                                                  uint32_t bindingCount, const VkBuffer *pBuffers,
7161                                                                  const VkDeviceSize *pOffsets) {
7162    bool skipCall = false;
7163    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7164    std::unique_lock<std::mutex> lock(global_lock);
7165#if MTMERGESOURCE
7166    for (uint32_t i = 0; i < bindingCount; ++i) {
7167        VkDeviceMemory mem;
7168        skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)pBuffers[i], VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7169        auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7170        if (cb_data != dev_data->commandBufferMap.end()) {
7171            std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindVertexBuffers()"); };
7172            cb_data->second->validate_functions.push_back(function);
7173        }
7174    }
7175    // TODO : Somewhere need to verify that VBs have correct usage state flagged
7176#endif
7177    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7178    if (pCB) {
7179        addCmd(dev_data, pCB, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
7180        updateResourceTracking(pCB, firstBinding, bindingCount, pBuffers);
7181    } else {
7182        skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
7183    }
7184    lock.unlock();
7185    if (!skipCall)
7186        dev_data->device_dispatch_table->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
7187}
7188
7189/* expects global_lock to be held by caller */
7190static bool markStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
7191    bool skip_call = false;
7192
7193    for (auto imageView : pCB->updateImages) {
7194        auto iv_data = dev_data->imageViewMap.find(imageView);
7195        if (iv_data == dev_data->imageViewMap.end())
7196            continue;
7197        VkImage image = iv_data->second.image;
7198        VkDeviceMemory mem;
7199        skip_call |=
7200            get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7201        std::function<bool()> function = [=]() {
7202            set_memory_valid(dev_data, mem, true, image);
7203            return false;
7204        };
7205        pCB->validate_functions.push_back(function);
7206    }
7207    for (auto buffer : pCB->updateBuffers) {
7208        VkDeviceMemory mem;
7209        skip_call |= get_mem_binding_from_object(dev_data, (uint64_t)buffer,
7210                                                 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7211        std::function<bool()> function = [=]() {
7212            set_memory_valid(dev_data, mem, true);
7213            return false;
7214        };
7215        pCB->validate_functions.push_back(function);
7216    }
7217    return skip_call;
7218}
7219
7220VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
7221                                                     uint32_t firstVertex, uint32_t firstInstance) {
7222    bool skipCall = false;
7223    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7224    std::unique_lock<std::mutex> lock(global_lock);
7225    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7226    if (pCB) {
7227        skipCall |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()");
7228        pCB->drawCount[DRAW]++;
7229        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS);
7230        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7231        // TODO : Need to pass commandBuffer as srcObj here
7232        skipCall |=
7233            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7234                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW]++);
7235        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7236        if (!skipCall) {
7237            updateResourceTrackingOnDraw(pCB);
7238        }
7239        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDraw");
7240    }
7241    lock.unlock();
7242    if (!skipCall)
7243        dev_data->device_dispatch_table->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
7244}
7245
7246VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
7247                                                            uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
7248                                                            uint32_t firstInstance) {
7249    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7250    bool skipCall = false;
7251    std::unique_lock<std::mutex> lock(global_lock);
7252    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7253    if (pCB) {
7254        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()");
7255        pCB->drawCount[DRAW_INDEXED]++;
7256        skipCall |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS);
7257        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7258        // TODO : Need to pass commandBuffer as srcObj here
7259        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7260                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7261                            "vkCmdDrawIndexed() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++);
7262        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7263        if (!skipCall) {
7264            updateResourceTrackingOnDraw(pCB);
7265        }
7266        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed");
7267    }
7268    lock.unlock();
7269    if (!skipCall)
7270        dev_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset,
7271                                                        firstInstance);
7272}
7273
7274VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7275vkCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7276    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7277    bool skipCall = false;
7278    std::unique_lock<std::mutex> lock(global_lock);
7279#if MTMERGESOURCE
7280    VkDeviceMemory mem;
7281    // MTMTODO : merge with code below
7282    skipCall =
7283        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7284    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndirect");
7285#endif
7286    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7287    if (pCB) {
7288        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()");
7289        pCB->drawCount[DRAW_INDIRECT]++;
7290        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS);
7291        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7292        // TODO : Need to pass commandBuffer as srcObj here
7293        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7294                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7295                            "vkCmdDrawIndirect() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++);
7296        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7297        if (!skipCall) {
7298            updateResourceTrackingOnDraw(pCB);
7299        }
7300        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndirect");
7301    }
7302    lock.unlock();
7303    if (!skipCall)
7304        dev_data->device_dispatch_table->CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
7305}
7306
7307VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7308vkCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7309    bool skipCall = false;
7310    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7311    std::unique_lock<std::mutex> lock(global_lock);
7312#if MTMERGESOURCE
7313    VkDeviceMemory mem;
7314    // MTMTODO : merge with code below
7315    skipCall =
7316        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7317    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndexedIndirect");
7318#endif
7319    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7320    if (pCB) {
7321        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()");
7322        pCB->drawCount[DRAW_INDEXED_INDIRECT]++;
7323        skipCall |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS);
7324        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7325        // TODO : Need to pass commandBuffer as srcObj here
7326        skipCall |=
7327            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7328                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call #%" PRIu64 ", reporting DS state:",
7329                    g_drawCount[DRAW_INDEXED_INDIRECT]++);
7330        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7331        if (!skipCall) {
7332            updateResourceTrackingOnDraw(pCB);
7333        }
7334        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexedIndirect");
7335    }
7336    lock.unlock();
7337    if (!skipCall)
7338        dev_data->device_dispatch_table->CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
7339}
7340
7341VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
7342    bool skipCall = false;
7343    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7344    std::unique_lock<std::mutex> lock(global_lock);
7345    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7346    if (pCB) {
7347        // TODO : Re-enable validate_and_update_draw_state() when it supports compute shaders
7348        // skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE);
7349        // TODO : Call below is temporary until call above can be re-enabled
7350        update_shader_storage_images_and_buffers(dev_data, pCB);
7351        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7352        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()");
7353        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatch");
7354    }
7355    lock.unlock();
7356    if (!skipCall)
7357        dev_data->device_dispatch_table->CmdDispatch(commandBuffer, x, y, z);
7358}
7359
7360VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7361vkCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
7362    bool skipCall = false;
7363    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7364    std::unique_lock<std::mutex> lock(global_lock);
7365#if MTMERGESOURCE
7366    VkDeviceMemory mem;
7367    skipCall =
7368        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7369    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDispatchIndirect");
7370#endif
7371    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7372    if (pCB) {
7373        // TODO : Re-enable validate_and_update_draw_state() when it supports compute shaders
7374        // skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE);
7375        // TODO : Call below is temporary until call above can be re-enabled
7376        update_shader_storage_images_and_buffers(dev_data, pCB);
7377        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7378        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()");
7379        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatchIndirect");
7380    }
7381    lock.unlock();
7382    if (!skipCall)
7383        dev_data->device_dispatch_table->CmdDispatchIndirect(commandBuffer, buffer, offset);
7384}
7385
7386VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
7387                                                           uint32_t regionCount, const VkBufferCopy *pRegions) {
7388    bool skipCall = false;
7389    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7390    std::unique_lock<std::mutex> lock(global_lock);
7391#if MTMERGESOURCE
7392    VkDeviceMemory mem;
7393    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7394    skipCall =
7395        get_mem_binding_from_object(dev_data, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7396    if (cb_data != dev_data->commandBufferMap.end()) {
7397        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyBuffer()"); };
7398        cb_data->second->validate_functions.push_back(function);
7399    }
7400    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBuffer");
7401    skipCall |=
7402        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7403    if (cb_data != dev_data->commandBufferMap.end()) {
7404        std::function<bool()> function = [=]() {
7405            set_memory_valid(dev_data, mem, true);
7406            return false;
7407        };
7408        cb_data->second->validate_functions.push_back(function);
7409    }
7410    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBuffer");
7411    // Validate that SRC & DST buffers have correct usage flags set
7412    skipCall |= validate_buffer_usage_flags(dev_data, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
7413                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7414    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7415                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7416#endif
7417    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7418    if (pCB) {
7419        skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
7420        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBuffer");
7421    }
7422    lock.unlock();
7423    if (!skipCall)
7424        dev_data->device_dispatch_table->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
7425}
7426
7427static bool VerifySourceImageLayout(VkCommandBuffer cmdBuffer, VkImage srcImage, VkImageSubresourceLayers subLayers,
7428                                    VkImageLayout srcImageLayout) {
7429    bool skip_call = false;
7430
7431    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7432    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7433    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7434        uint32_t layer = i + subLayers.baseArrayLayer;
7435        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7436        IMAGE_CMD_BUF_LAYOUT_NODE node;
7437        if (!FindLayout(pCB, srcImage, sub, node)) {
7438            SetLayout(pCB, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout));
7439            continue;
7440        }
7441        if (node.layout != srcImageLayout) {
7442            // TODO: Improve log message in the next pass
7443            skip_call |=
7444                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7445                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
7446                                                                        "and doesn't match the current layout %s.",
7447                        string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
7448        }
7449    }
7450    if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
7451        if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7452            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7453            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7454                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7455                                 "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
7456        } else {
7457            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7458                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be "
7459                                                                       "TRANSFER_SRC_OPTIMAL or GENERAL.",
7460                                 string_VkImageLayout(srcImageLayout));
7461        }
7462    }
7463    return skip_call;
7464}
7465
7466static bool VerifyDestImageLayout(VkCommandBuffer cmdBuffer, VkImage destImage, VkImageSubresourceLayers subLayers,
7467                                  VkImageLayout destImageLayout) {
7468    bool skip_call = false;
7469
7470    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7471    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7472    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7473        uint32_t layer = i + subLayers.baseArrayLayer;
7474        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7475        IMAGE_CMD_BUF_LAYOUT_NODE node;
7476        if (!FindLayout(pCB, destImage, sub, node)) {
7477            SetLayout(pCB, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout));
7478            continue;
7479        }
7480        if (node.layout != destImageLayout) {
7481            skip_call |=
7482                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7483                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
7484                                                                        "doesn't match the current layout %s.",
7485                        string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
7486        }
7487    }
7488    if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
7489        if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7490            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7491            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7492                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7493                                 "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
7494        } else {
7495            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7496                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be "
7497                                                                       "TRANSFER_DST_OPTIMAL or GENERAL.",
7498                                 string_VkImageLayout(destImageLayout));
7499        }
7500    }
7501    return skip_call;
7502}
7503
7504VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7505vkCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7506               VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
7507    bool skipCall = false;
7508    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7509    std::unique_lock<std::mutex> lock(global_lock);
7510#if MTMERGESOURCE
7511    VkDeviceMemory mem;
7512    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7513    // Validate that src & dst images have correct usage flags set
7514    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7515    if (cb_data != dev_data->commandBufferMap.end()) {
7516        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyImage()", srcImage); };
7517        cb_data->second->validate_functions.push_back(function);
7518    }
7519    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImage");
7520    skipCall |=
7521        get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7522    if (cb_data != dev_data->commandBufferMap.end()) {
7523        std::function<bool()> function = [=]() {
7524            set_memory_valid(dev_data, mem, true, dstImage);
7525            return false;
7526        };
7527        cb_data->second->validate_functions.push_back(function);
7528    }
7529    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImage");
7530    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7531                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7532    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7533                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7534#endif
7535    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7536    if (pCB) {
7537        skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGE, "vkCmdCopyImage()");
7538        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImage");
7539        for (uint32_t i = 0; i < regionCount; ++i) {
7540            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].srcSubresource, srcImageLayout);
7541            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].dstSubresource, dstImageLayout);
7542        }
7543    }
7544    lock.unlock();
7545    if (!skipCall)
7546        dev_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7547                                                      regionCount, pRegions);
7548}
7549
7550VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7551vkCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7552               VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
7553    bool skipCall = false;
7554    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7555    std::unique_lock<std::mutex> lock(global_lock);
7556#if MTMERGESOURCE
7557    VkDeviceMemory mem;
7558    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7559    // Validate that src & dst images have correct usage flags set
7560    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7561    if (cb_data != dev_data->commandBufferMap.end()) {
7562        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBlitImage()", srcImage); };
7563        cb_data->second->validate_functions.push_back(function);
7564    }
7565    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdBlitImage");
7566    skipCall |=
7567        get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7568    if (cb_data != dev_data->commandBufferMap.end()) {
7569        std::function<bool()> function = [=]() {
7570            set_memory_valid(dev_data, mem, true, dstImage);
7571            return false;
7572        };
7573        cb_data->second->validate_functions.push_back(function);
7574    }
7575    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdBlitImage");
7576    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7577                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7578    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7579                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7580#endif
7581    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7582    if (pCB) {
7583        skipCall |= addCmd(dev_data, pCB, CMD_BLITIMAGE, "vkCmdBlitImage()");
7584        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBlitImage");
7585    }
7586    lock.unlock();
7587    if (!skipCall)
7588        dev_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7589                                                      regionCount, pRegions, filter);
7590}
7591
7592VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
7593                                                                  VkImage dstImage, VkImageLayout dstImageLayout,
7594                                                                  uint32_t regionCount, const VkBufferImageCopy *pRegions) {
7595    bool skipCall = false;
7596    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7597    std::unique_lock<std::mutex> lock(global_lock);
7598#if MTMERGESOURCE
7599    VkDeviceMemory mem;
7600    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7601    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7602    if (cb_data != dev_data->commandBufferMap.end()) {
7603        std::function<bool()> function = [=]() {
7604            set_memory_valid(dev_data, mem, true, dstImage);
7605            return false;
7606        };
7607        cb_data->second->validate_functions.push_back(function);
7608    }
7609    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBufferToImage");
7610    skipCall |=
7611        get_mem_binding_from_object(dev_data, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7612    if (cb_data != dev_data->commandBufferMap.end()) {
7613        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyBufferToImage()"); };
7614        cb_data->second->validate_functions.push_back(function);
7615    }
7616    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBufferToImage");
7617    // Validate that src buff & dst image have correct usage flags set
7618    skipCall |= validate_buffer_usage_flags(dev_data, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
7619                                            "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7620    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7621                                           "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7622#endif
7623    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7624    if (pCB) {
7625        skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
7626        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBufferToImage");
7627        for (uint32_t i = 0; i < regionCount; ++i) {
7628            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].imageSubresource, dstImageLayout);
7629        }
7630    }
7631    lock.unlock();
7632    if (!skipCall)
7633        dev_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount,
7634                                                              pRegions);
7635}
7636
7637VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
7638                                                                  VkImageLayout srcImageLayout, VkBuffer dstBuffer,
7639                                                                  uint32_t regionCount, const VkBufferImageCopy *pRegions) {
7640    bool skipCall = false;
7641    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7642    std::unique_lock<std::mutex> lock(global_lock);
7643#if MTMERGESOURCE
7644    VkDeviceMemory mem;
7645    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7646    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7647    if (cb_data != dev_data->commandBufferMap.end()) {
7648        std::function<bool()> function = [=]() {
7649            return validate_memory_is_valid(dev_data, mem, "vkCmdCopyImageToBuffer()", srcImage);
7650        };
7651        cb_data->second->validate_functions.push_back(function);
7652    }
7653    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImageToBuffer");
7654    skipCall |=
7655        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7656    if (cb_data != dev_data->commandBufferMap.end()) {
7657        std::function<bool()> function = [=]() {
7658            set_memory_valid(dev_data, mem, true);
7659            return false;
7660        };
7661        cb_data->second->validate_functions.push_back(function);
7662    }
7663    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImageToBuffer");
7664    // Validate that dst buff & src image have correct usage flags set
7665    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7666                                           "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7667    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7668                                            "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7669#endif
7670    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7671    if (pCB) {
7672        skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
7673        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImageToBuffer");
7674        for (uint32_t i = 0; i < regionCount; ++i) {
7675            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].imageSubresource, srcImageLayout);
7676        }
7677    }
7678    lock.unlock();
7679    if (!skipCall)
7680        dev_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount,
7681                                                              pRegions);
7682}
7683
7684VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
7685                                                             VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
7686    bool skipCall = false;
7687    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7688    std::unique_lock<std::mutex> lock(global_lock);
7689#if MTMERGESOURCE
7690    VkDeviceMemory mem;
7691    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7692    skipCall =
7693        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7694    if (cb_data != dev_data->commandBufferMap.end()) {
7695        std::function<bool()> function = [=]() {
7696            set_memory_valid(dev_data, mem, true);
7697            return false;
7698        };
7699        cb_data->second->validate_functions.push_back(function);
7700    }
7701    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdUpdateBuffer");
7702    // Validate that dst buff has correct usage flags set
7703    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7704                                            "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7705#endif
7706    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7707    if (pCB) {
7708        skipCall |= addCmd(dev_data, pCB, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
7709        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyUpdateBuffer");
7710    }
7711    lock.unlock();
7712    if (!skipCall)
7713        dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
7714}
7715
7716VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7717vkCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
7718    bool skipCall = false;
7719    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7720    std::unique_lock<std::mutex> lock(global_lock);
7721#if MTMERGESOURCE
7722    VkDeviceMemory mem;
7723    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7724    skipCall =
7725        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7726    if (cb_data != dev_data->commandBufferMap.end()) {
7727        std::function<bool()> function = [=]() {
7728            set_memory_valid(dev_data, mem, true);
7729            return false;
7730        };
7731        cb_data->second->validate_functions.push_back(function);
7732    }
7733    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdFillBuffer");
7734    // Validate that dst buff has correct usage flags set
7735    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7736                                            "vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7737#endif
7738    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7739    if (pCB) {
7740        skipCall |= addCmd(dev_data, pCB, CMD_FILLBUFFER, "vkCmdFillBuffer()");
7741        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyFillBuffer");
7742    }
7743    lock.unlock();
7744    if (!skipCall)
7745        dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
7746}
7747
7748VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
7749                                                                 const VkClearAttachment *pAttachments, uint32_t rectCount,
7750                                                                 const VkClearRect *pRects) {
7751    bool skipCall = false;
7752    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7753    std::unique_lock<std::mutex> lock(global_lock);
7754    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7755    if (pCB) {
7756        skipCall |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
7757        // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
7758        if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
7759            (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
7760            // TODO : commandBuffer should be srcObj
7761            // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
7762            // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
7763            // call CmdClearAttachments
7764            // Otherwise this seems more like a performance warning.
7765            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7766                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
7767                                "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
7768                                " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
7769                                (uint64_t)(commandBuffer));
7770        }
7771        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments");
7772    }
7773
7774    // Validate that attachment is in reference list of active subpass
7775    if (pCB->activeRenderPass) {
7776        const VkRenderPassCreateInfo *pRPCI = dev_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo;
7777        const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
7778
7779        for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) {
7780            const VkClearAttachment *attachment = &pAttachments[attachment_idx];
7781            if (attachment->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
7782                bool found = false;
7783                for (uint32_t i = 0; i < pSD->colorAttachmentCount; i++) {
7784                    if (attachment->colorAttachment == pSD->pColorAttachments[i].attachment) {
7785                        found = true;
7786                        break;
7787                    }
7788                }
7789                if (!found) {
7790                    skipCall |= log_msg(
7791                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7792                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
7793                        "vkCmdClearAttachments() attachment index %d not found in attachment reference array of active subpass %d",
7794                        attachment->colorAttachment, pCB->activeSubpass);
7795                }
7796            } else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
7797                if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
7798                    (pSD->pDepthStencilAttachment->attachment ==
7799                     VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
7800
7801                    skipCall |= log_msg(
7802                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7803                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
7804                        "vkCmdClearAttachments() attachment index %d does not match depthStencilAttachment.attachment (%d) found "
7805                        "in active subpass %d",
7806                        attachment->colorAttachment,
7807                        (pSD->pDepthStencilAttachment) ? pSD->pDepthStencilAttachment->attachment : VK_ATTACHMENT_UNUSED,
7808                        pCB->activeSubpass);
7809                }
7810            }
7811        }
7812    }
7813    lock.unlock();
7814    if (!skipCall)
7815        dev_data->device_dispatch_table->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
7816}
7817
7818VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
7819                                                                VkImageLayout imageLayout, const VkClearColorValue *pColor,
7820                                                                uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
7821    bool skipCall = false;
7822    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7823    std::unique_lock<std::mutex> lock(global_lock);
7824#if MTMERGESOURCE
7825    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
7826    VkDeviceMemory mem;
7827    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7828    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7829    if (cb_data != dev_data->commandBufferMap.end()) {
7830        std::function<bool()> function = [=]() {
7831            set_memory_valid(dev_data, mem, true, image);
7832            return false;
7833        };
7834        cb_data->second->validate_functions.push_back(function);
7835    }
7836    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearColorImage");
7837#endif
7838    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7839    if (pCB) {
7840        skipCall |= addCmd(dev_data, pCB, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
7841        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearColorImage");
7842    }
7843    lock.unlock();
7844    if (!skipCall)
7845        dev_data->device_dispatch_table->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
7846}
7847
7848VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7849vkCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
7850                            const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
7851                            const VkImageSubresourceRange *pRanges) {
7852    bool skipCall = false;
7853    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7854    std::unique_lock<std::mutex> lock(global_lock);
7855#if MTMERGESOURCE
7856    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
7857    VkDeviceMemory mem;
7858    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7859    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7860    if (cb_data != dev_data->commandBufferMap.end()) {
7861        std::function<bool()> function = [=]() {
7862            set_memory_valid(dev_data, mem, true, image);
7863            return false;
7864        };
7865        cb_data->second->validate_functions.push_back(function);
7866    }
7867    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearDepthStencilImage");
7868#endif
7869    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7870    if (pCB) {
7871        skipCall |= addCmd(dev_data, pCB, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
7872        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearDepthStencilImage");
7873    }
7874    lock.unlock();
7875    if (!skipCall)
7876        dev_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount,
7877                                                                   pRanges);
7878}
7879
7880VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7881vkCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7882                  VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
7883    bool skipCall = false;
7884    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7885    std::unique_lock<std::mutex> lock(global_lock);
7886#if MTMERGESOURCE
7887    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7888    VkDeviceMemory mem;
7889    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7890    if (cb_data != dev_data->commandBufferMap.end()) {
7891        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdResolveImage()", srcImage); };
7892        cb_data->second->validate_functions.push_back(function);
7893    }
7894    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdResolveImage");
7895    skipCall |=
7896        get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7897    if (cb_data != dev_data->commandBufferMap.end()) {
7898        std::function<bool()> function = [=]() {
7899            set_memory_valid(dev_data, mem, true, dstImage);
7900            return false;
7901        };
7902        cb_data->second->validate_functions.push_back(function);
7903    }
7904    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdResolveImage");
7905#endif
7906    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7907    if (pCB) {
7908        skipCall |= addCmd(dev_data, pCB, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
7909        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResolveImage");
7910    }
7911    lock.unlock();
7912    if (!skipCall)
7913        dev_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7914                                                         regionCount, pRegions);
7915}
7916
7917bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7918    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7919    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7920    if (pCB) {
7921        pCB->eventToStageMap[event] = stageMask;
7922    }
7923    auto queue_data = dev_data->queueMap.find(queue);
7924    if (queue_data != dev_data->queueMap.end()) {
7925        queue_data->second.eventToStageMap[event] = stageMask;
7926    }
7927    return false;
7928}
7929
7930VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7931vkCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7932    bool skipCall = false;
7933    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7934    std::unique_lock<std::mutex> lock(global_lock);
7935    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7936    if (pCB) {
7937        skipCall |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
7938        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent");
7939        pCB->events.push_back(event);
7940        std::function<bool(VkQueue)> eventUpdate =
7941            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
7942        pCB->eventUpdates.push_back(eventUpdate);
7943    }
7944    lock.unlock();
7945    if (!skipCall)
7946        dev_data->device_dispatch_table->CmdSetEvent(commandBuffer, event, stageMask);
7947}
7948
7949VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7950vkCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7951    bool skipCall = false;
7952    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7953    std::unique_lock<std::mutex> lock(global_lock);
7954    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7955    if (pCB) {
7956        skipCall |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
7957        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent");
7958        pCB->events.push_back(event);
7959        std::function<bool(VkQueue)> eventUpdate =
7960            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
7961        pCB->eventUpdates.push_back(eventUpdate);
7962    }
7963    lock.unlock();
7964    if (!skipCall)
7965        dev_data->device_dispatch_table->CmdResetEvent(commandBuffer, event, stageMask);
7966}
7967
7968static bool TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
7969                                   const VkImageMemoryBarrier *pImgMemBarriers) {
7970    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7971    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7972    bool skip = false;
7973    uint32_t levelCount = 0;
7974    uint32_t layerCount = 0;
7975
7976    for (uint32_t i = 0; i < memBarrierCount; ++i) {
7977        auto mem_barrier = &pImgMemBarriers[i];
7978        if (!mem_barrier)
7979            continue;
7980        // TODO: Do not iterate over every possibility - consolidate where
7981        // possible
7982        ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
7983
7984        for (uint32_t j = 0; j < levelCount; j++) {
7985            uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
7986            for (uint32_t k = 0; k < layerCount; k++) {
7987                uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
7988                VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer};
7989                IMAGE_CMD_BUF_LAYOUT_NODE node;
7990                if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
7991                    SetLayout(pCB, mem_barrier->image, sub,
7992                              IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout));
7993                    continue;
7994                }
7995                if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
7996                    // TODO: Set memory invalid which is in mem_tracker currently
7997                } else if (node.layout != mem_barrier->oldLayout) {
7998                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
7999                                    __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s "
8000                                                                                    "when current layout is %s.",
8001                                    string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
8002                }
8003                SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
8004            }
8005        }
8006    }
8007    return skip;
8008}
8009
8010// Print readable FlagBits in FlagMask
8011static std::string string_VkAccessFlags(VkAccessFlags accessMask) {
8012    std::string result;
8013    std::string separator;
8014
8015    if (accessMask == 0) {
8016        result = "[None]";
8017    } else {
8018        result = "[";
8019        for (auto i = 0; i < 32; i++) {
8020            if (accessMask & (1 << i)) {
8021                result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
8022                separator = " | ";
8023            }
8024        }
8025        result = result + "]";
8026    }
8027    return result;
8028}
8029
8030// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
8031// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
8032// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
8033static bool ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8034                             const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits,
8035                             const char *type) {
8036    bool skip_call = false;
8037
8038    if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
8039        if (accessMask & !(required_bit | optional_bits)) {
8040            // TODO: Verify against Valid Use
8041            skip_call |=
8042                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8043                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.",
8044                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8045        }
8046    } else {
8047        if (!required_bit) {
8048            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8049                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
8050                                                                  "%s when layout is %s, unless the app has previously added a "
8051                                                                  "barrier for this transition.",
8052                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
8053                                 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
8054        } else {
8055            std::string opt_bits;
8056            if (optional_bits != 0) {
8057                std::stringstream ss;
8058                ss << optional_bits;
8059                opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
8060            }
8061            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8062                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
8063                                                                  "layout is %s, unless the app has previously added a barrier for "
8064                                                                  "this transition.",
8065                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
8066                                 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
8067        }
8068    }
8069    return skip_call;
8070}
8071
8072static bool ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8073                                        const VkImageLayout &layout, const char *type) {
8074    bool skip_call = false;
8075    switch (layout) {
8076    case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
8077        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
8078                                      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type);
8079        break;
8080    }
8081    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
8082        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
8083                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type);
8084        break;
8085    }
8086    case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
8087        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
8088        break;
8089    }
8090    case VK_IMAGE_LAYOUT_PREINITIALIZED: {
8091        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type);
8092        break;
8093    }
8094    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
8095        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8096                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8097        break;
8098    }
8099    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
8100        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8101                                      VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8102        break;
8103    }
8104    case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
8105        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
8106        break;
8107    }
8108    case VK_IMAGE_LAYOUT_UNDEFINED: {
8109        if (accessMask != 0) {
8110            // TODO: Verify against Valid Use section spec
8111            skip_call |=
8112                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8113                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.",
8114                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8115        }
8116        break;
8117    }
8118    case VK_IMAGE_LAYOUT_GENERAL:
8119    default: { break; }
8120    }
8121    return skip_call;
8122}
8123
8124static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8125                             const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
8126                             const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
8127                             const VkImageMemoryBarrier *pImageMemBarriers) {
8128    bool skip_call = false;
8129    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8130    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8131    if (pCB->activeRenderPass && memBarrierCount) {
8132        if (!dev_data->renderPassMap[pCB->activeRenderPass]->hasSelfDependency[pCB->activeSubpass]) {
8133            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8134                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
8135                                                                  "with no self dependency specified.",
8136                                 funcName, pCB->activeSubpass);
8137        }
8138    }
8139    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
8140        auto mem_barrier = &pImageMemBarriers[i];
8141        auto image_data = dev_data->imageMap.find(mem_barrier->image);
8142        if (image_data != dev_data->imageMap.end()) {
8143            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
8144            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
8145            if (image_data->second.createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
8146                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
8147                // be VK_QUEUE_FAMILY_IGNORED
8148                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
8149                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8150                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8151                                         "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
8152                                         "VK_SHARING_MODE_CONCURRENT.  Src and dst "
8153                                         " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
8154                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8155                }
8156            } else {
8157                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
8158                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
8159                // or both be a valid queue family
8160                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
8161                    (src_q_f_index != dst_q_f_index)) {
8162                    skip_call |=
8163                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8164                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
8165                                                                     "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
8166                                                                     "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
8167                                                                     "must be.",
8168                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8169                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
8170                           ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
8171                            (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) {
8172                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8173                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8174                                         "%s: Image 0x%" PRIx64 " was created with sharingMode "
8175                                         "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
8176                                         " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
8177                                         "queueFamilies crated for this device.",
8178                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index,
8179                                         dst_q_f_index, dev_data->phys_dev_properties.queue_family_properties.size());
8180                }
8181            }
8182        }
8183
8184        if (mem_barrier) {
8185            skip_call |=
8186                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
8187            skip_call |=
8188                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
8189            if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
8190                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8191                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
8192                                                         "PREINITIALIZED.",
8193                        funcName);
8194            }
8195            auto image_data = dev_data->imageMap.find(mem_barrier->image);
8196            VkFormat format = VK_FORMAT_UNDEFINED;
8197            uint32_t arrayLayers = 0, mipLevels = 0;
8198            bool imageFound = false;
8199            if (image_data != dev_data->imageMap.end()) {
8200                format = image_data->second.createInfo.format;
8201                arrayLayers = image_data->second.createInfo.arrayLayers;
8202                mipLevels = image_data->second.createInfo.mipLevels;
8203                imageFound = true;
8204            } else if (dev_data->device_extensions.wsi_enabled) {
8205                auto imageswap_data = dev_data->device_extensions.imageToSwapchainMap.find(mem_barrier->image);
8206                if (imageswap_data != dev_data->device_extensions.imageToSwapchainMap.end()) {
8207                    auto swapchain_data = dev_data->device_extensions.swapchainMap.find(imageswap_data->second);
8208                    if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
8209                        format = swapchain_data->second->createInfo.imageFormat;
8210                        arrayLayers = swapchain_data->second->createInfo.imageArrayLayers;
8211                        mipLevels = 1;
8212                        imageFound = true;
8213                    }
8214                }
8215            }
8216            if (imageFound) {
8217                if (vk_format_is_depth_and_stencil(format) &&
8218                    (!(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) ||
8219                     !(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT))) {
8220                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8221                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth and stencil format and thus must "
8222                                                             "have both VK_IMAGE_ASPECT_DEPTH_BIT and "
8223                                                             "VK_IMAGE_ASPECT_STENCIL_BIT set.",
8224                            funcName);
8225                }
8226                int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
8227                                     ? 1
8228                                     : mem_barrier->subresourceRange.layerCount;
8229                if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
8230                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8231                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
8232                                                             "baseArrayLayer (%d) and layerCount (%d) be less "
8233                                                             "than or equal to the total number of layers (%d).",
8234                            funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount,
8235                            arrayLayers);
8236                }
8237                int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
8238                                     ? 1
8239                                     : mem_barrier->subresourceRange.levelCount;
8240                if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
8241                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8242                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
8243                                                             "(%d) and levelCount (%d) be less than or equal to "
8244                                                             "the total number of levels (%d).",
8245                            funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount,
8246                            mipLevels);
8247                }
8248            }
8249        }
8250    }
8251    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
8252        auto mem_barrier = &pBufferMemBarriers[i];
8253        if (pCB->activeRenderPass) {
8254            skip_call |=
8255                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8256                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
8257        }
8258        if (!mem_barrier)
8259            continue;
8260
8261        // Validate buffer barrier queue family indices
8262        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8263             mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
8264            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8265             mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) {
8266            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8267                                 DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8268                                 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
8269                                 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
8270                                 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8271                                 dev_data->phys_dev_properties.queue_family_properties.size());
8272        }
8273
8274        auto buffer_data = dev_data->bufferMap.find(mem_barrier->buffer);
8275        if (buffer_data != dev_data->bufferMap.end()) {
8276            VkDeviceSize buffer_size = (buffer_data->second.createInfo.sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO)
8277                                           ? buffer_data->second.createInfo.size
8278                                           : 0;
8279            if (mem_barrier->offset >= buffer_size) {
8280                skip_call |= log_msg(
8281                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8282                    DRAWSTATE_INVALID_BARRIER, "DS",
8283                    "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64 " which is not less than total size %" PRIu64 ".",
8284                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8285                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(buffer_size));
8286            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
8287                skip_call |= log_msg(
8288                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8289                    DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64 " and size %" PRIu64
8290                                                     " whose sum is greater than total size %" PRIu64 ".",
8291                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8292                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(mem_barrier->size),
8293                    reinterpret_cast<const uint64_t &>(buffer_size));
8294            }
8295        }
8296    }
8297    return skip_call;
8298}
8299
8300bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) {
8301    bool skip_call = false;
8302    VkPipelineStageFlags stageMask = 0;
8303    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
8304    for (uint32_t i = 0; i < eventCount; ++i) {
8305        auto event = pCB->events[firstEventIndex + i];
8306        auto queue_data = dev_data->queueMap.find(queue);
8307        if (queue_data == dev_data->queueMap.end())
8308            return false;
8309        auto event_data = queue_data->second.eventToStageMap.find(event);
8310        if (event_data != queue_data->second.eventToStageMap.end()) {
8311            stageMask |= event_data->second;
8312        } else {
8313            auto global_event_data = dev_data->eventMap.find(event);
8314            if (global_event_data == dev_data->eventMap.end()) {
8315                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
8316                                     reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
8317                                     "Event 0x%" PRIx64 " cannot be waited on if it has never been set.",
8318                                     reinterpret_cast<const uint64_t &>(event));
8319            } else {
8320                stageMask |= global_event_data->second.stageMask;
8321            }
8322        }
8323    }
8324    if (sourceStageMask != stageMask) {
8325        skip_call |=
8326            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8327                    DRAWSTATE_INVALID_EVENT, "DS",
8328                    "Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%x which must be the bitwise OR of the "
8329                    "stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with vkSetEvent.",
8330                    sourceStageMask);
8331    }
8332    return skip_call;
8333}
8334
8335VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8336vkCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask,
8337                VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8338                uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8339                uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8340    bool skipCall = false;
8341    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8342    std::unique_lock<std::mutex> lock(global_lock);
8343    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8344    if (pCB) {
8345        auto firstEventIndex = pCB->events.size();
8346        for (uint32_t i = 0; i < eventCount; ++i) {
8347            pCB->waitedEvents.push_back(pEvents[i]);
8348            pCB->events.push_back(pEvents[i]);
8349        }
8350        std::function<bool(VkQueue)> eventUpdate =
8351            std::bind(validateEventStageMask, std::placeholders::_1, pCB, eventCount, firstEventIndex, sourceStageMask);
8352        pCB->eventUpdates.push_back(eventUpdate);
8353        if (pCB->state == CB_RECORDING) {
8354            skipCall |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()");
8355        } else {
8356            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
8357        }
8358        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8359        skipCall |=
8360            ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8361                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8362    }
8363    lock.unlock();
8364    if (!skipCall)
8365        dev_data->device_dispatch_table->CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
8366                                                       memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8367                                                       pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8368}
8369
8370VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8371vkCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
8372                     VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8373                     uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8374                     uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8375    bool skipCall = false;
8376    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8377    std::unique_lock<std::mutex> lock(global_lock);
8378    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8379    if (pCB) {
8380        skipCall |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
8381        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8382        skipCall |=
8383            ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8384                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8385    }
8386    lock.unlock();
8387    if (!skipCall)
8388        dev_data->device_dispatch_table->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
8389                                                            memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8390                                                            pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8391}
8392
8393VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8394vkCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
8395    bool skipCall = false;
8396    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8397    std::unique_lock<std::mutex> lock(global_lock);
8398    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8399    if (pCB) {
8400        QueryObject query = {queryPool, slot};
8401        pCB->activeQueries.insert(query);
8402        if (!pCB->startedQueries.count(query)) {
8403            pCB->startedQueries.insert(query);
8404        }
8405        skipCall |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
8406    }
8407    lock.unlock();
8408    if (!skipCall)
8409        dev_data->device_dispatch_table->CmdBeginQuery(commandBuffer, queryPool, slot, flags);
8410}
8411
8412VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
8413    bool skipCall = false;
8414    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8415    std::unique_lock<std::mutex> lock(global_lock);
8416    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8417    if (pCB) {
8418        QueryObject query = {queryPool, slot};
8419        if (!pCB->activeQueries.count(query)) {
8420            skipCall |=
8421                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8422                        DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool %" PRIu64 ", index %d",
8423                        (uint64_t)(queryPool), slot);
8424        } else {
8425            pCB->activeQueries.erase(query);
8426        }
8427        pCB->queryToStateMap[query] = 1;
8428        if (pCB->state == CB_RECORDING) {
8429            skipCall |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
8430        } else {
8431            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
8432        }
8433    }
8434    lock.unlock();
8435    if (!skipCall)
8436        dev_data->device_dispatch_table->CmdEndQuery(commandBuffer, queryPool, slot);
8437}
8438
8439VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8440vkCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
8441    bool skipCall = false;
8442    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8443    std::unique_lock<std::mutex> lock(global_lock);
8444    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8445    if (pCB) {
8446        for (uint32_t i = 0; i < queryCount; i++) {
8447            QueryObject query = {queryPool, firstQuery + i};
8448            pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
8449            pCB->queryToStateMap[query] = 0;
8450        }
8451        if (pCB->state == CB_RECORDING) {
8452            skipCall |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
8453        } else {
8454            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
8455        }
8456        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool");
8457    }
8458    lock.unlock();
8459    if (!skipCall)
8460        dev_data->device_dispatch_table->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
8461}
8462
8463VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8464vkCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
8465                          VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
8466    bool skipCall = false;
8467    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8468    std::unique_lock<std::mutex> lock(global_lock);
8469    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8470#if MTMERGESOURCE
8471    VkDeviceMemory mem;
8472    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8473    skipCall |=
8474        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8475    if (cb_data != dev_data->commandBufferMap.end()) {
8476        std::function<bool()> function = [=]() {
8477            set_memory_valid(dev_data, mem, true);
8478            return false;
8479        };
8480        cb_data->second->validate_functions.push_back(function);
8481    }
8482    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyQueryPoolResults");
8483    // Validate that DST buffer has correct usage flags set
8484    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8485                                            "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8486#endif
8487    if (pCB) {
8488        for (uint32_t i = 0; i < queryCount; i++) {
8489            QueryObject query = {queryPool, firstQuery + i};
8490            if (!pCB->queryToStateMap[query]) {
8491                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8492                                    __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
8493                                    "Requesting a copy from query to buffer with invalid query: queryPool %" PRIu64 ", index %d",
8494                                    (uint64_t)(queryPool), firstQuery + i);
8495            }
8496        }
8497        if (pCB->state == CB_RECORDING) {
8498            skipCall |= addCmd(dev_data, pCB, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
8499        } else {
8500            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
8501        }
8502        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyQueryPoolResults");
8503    }
8504    lock.unlock();
8505    if (!skipCall)
8506        dev_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer,
8507                                                                 dstOffset, stride, flags);
8508}
8509
8510VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
8511                                                              VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
8512                                                              const void *pValues) {
8513    bool skipCall = false;
8514    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8515    std::unique_lock<std::mutex> lock(global_lock);
8516    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8517    if (pCB) {
8518        if (pCB->state == CB_RECORDING) {
8519            skipCall |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
8520        } else {
8521            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
8522        }
8523    }
8524    if ((offset + size) > dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize) {
8525        skipCall |= validatePushConstantSize(dev_data, offset, size, "vkCmdPushConstants()");
8526    }
8527    // TODO : Add warning if push constant update doesn't align with range
8528    lock.unlock();
8529    if (!skipCall)
8530        dev_data->device_dispatch_table->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
8531}
8532
8533VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8534vkCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
8535    bool skipCall = false;
8536    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8537    std::unique_lock<std::mutex> lock(global_lock);
8538    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8539    if (pCB) {
8540        QueryObject query = {queryPool, slot};
8541        pCB->queryToStateMap[query] = 1;
8542        if (pCB->state == CB_RECORDING) {
8543            skipCall |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
8544        } else {
8545            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
8546        }
8547    }
8548    lock.unlock();
8549    if (!skipCall)
8550        dev_data->device_dispatch_table->CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
8551}
8552
8553VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
8554                                                                   const VkAllocationCallbacks *pAllocator,
8555                                                                   VkFramebuffer *pFramebuffer) {
8556    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8557    VkResult result = dev_data->device_dispatch_table->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
8558    if (VK_SUCCESS == result) {
8559        // Shadow create info and store in map
8560        std::lock_guard<std::mutex> lock(global_lock);
8561
8562        auto & fbNode = dev_data->frameBufferMap[*pFramebuffer];
8563        fbNode.createInfo = *pCreateInfo;
8564        if (pCreateInfo->pAttachments) {
8565            auto attachments = new VkImageView[pCreateInfo->attachmentCount];
8566            memcpy(attachments,
8567                   pCreateInfo->pAttachments,
8568                   pCreateInfo->attachmentCount * sizeof(VkImageView));
8569            fbNode.createInfo.pAttachments = attachments;
8570        }
8571        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8572            VkImageView view = pCreateInfo->pAttachments[i];
8573            auto view_data = dev_data->imageViewMap.find(view);
8574            if (view_data == dev_data->imageViewMap.end()) {
8575                continue;
8576            }
8577            MT_FB_ATTACHMENT_INFO fb_info;
8578            get_mem_binding_from_object(dev_data, (uint64_t)(view_data->second.image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8579                                        &fb_info.mem);
8580            fb_info.image = view_data->second.image;
8581            fbNode.attachments.push_back(fb_info);
8582        }
8583    }
8584    return result;
8585}
8586
8587static bool FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
8588                           std::unordered_set<uint32_t> &processed_nodes) {
8589    // If we have already checked this node we have not found a dependency path so return false.
8590    if (processed_nodes.count(index))
8591        return false;
8592    processed_nodes.insert(index);
8593    const DAGNode &node = subpass_to_node[index];
8594    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
8595    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
8596        for (auto elem : node.prev) {
8597            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
8598                return true;
8599        }
8600    } else {
8601        return true;
8602    }
8603    return false;
8604}
8605
8606static bool CheckDependencyExists(const layer_data *my_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
8607                                  const std::vector<DAGNode> &subpass_to_node, bool &skip_call) {
8608    bool result = true;
8609    // Loop through all subpasses that share the same attachment and make sure a dependency exists
8610    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
8611        if (static_cast<uint32_t>(subpass) == dependent_subpasses[k])
8612            continue;
8613        const DAGNode &node = subpass_to_node[subpass];
8614        // Check for a specified dependency between the two nodes. If one exists we are done.
8615        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
8616        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
8617        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
8618            // If no dependency exits an implicit dependency still might. If so, warn and if not throw an error.
8619            std::unordered_set<uint32_t> processed_nodes;
8620            if (FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
8621                FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes)) {
8622                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8623                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8624                                     "A dependency between subpasses %d and %d must exist but only an implicit one is specified.",
8625                                     subpass, dependent_subpasses[k]);
8626            } else {
8627                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8628                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8629                                     "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
8630                                     dependent_subpasses[k]);
8631                result = false;
8632            }
8633        }
8634    }
8635    return result;
8636}
8637
8638static bool CheckPreserved(const layer_data *my_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
8639                           const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip_call) {
8640    const DAGNode &node = subpass_to_node[index];
8641    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
8642    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
8643    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8644        if (attachment == subpass.pColorAttachments[j].attachment)
8645            return true;
8646    }
8647    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8648        if (attachment == subpass.pDepthStencilAttachment->attachment)
8649            return true;
8650    }
8651    bool result = false;
8652    // Loop through previous nodes and see if any of them write to the attachment.
8653    for (auto elem : node.prev) {
8654        result |= CheckPreserved(my_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
8655    }
8656    // If the attachment was written to by a previous node than this node needs to preserve it.
8657    if (result && depth > 0) {
8658        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
8659        bool has_preserved = false;
8660        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
8661            if (subpass.pPreserveAttachments[j] == attachment) {
8662                has_preserved = true;
8663                break;
8664            }
8665        }
8666        if (!has_preserved) {
8667            skip_call |=
8668                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8669                        DRAWSTATE_INVALID_RENDERPASS, "DS",
8670                        "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
8671        }
8672    }
8673    return result;
8674}
8675
8676template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
8677    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
8678           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
8679}
8680
8681bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
8682    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
8683            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
8684}
8685
8686static bool ValidateDependencies(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin,
8687                                 const std::vector<DAGNode> &subpass_to_node) {
8688    bool skip_call = false;
8689    const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo;
8690    const VkRenderPassCreateInfo *pCreateInfo = my_data->renderPassMap.at(pRenderPassBegin->renderPass)->pCreateInfo;
8691    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
8692    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
8693    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
8694    // Find overlapping attachments
8695    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8696        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
8697            VkImageView viewi = pFramebufferInfo->pAttachments[i];
8698            VkImageView viewj = pFramebufferInfo->pAttachments[j];
8699            if (viewi == viewj) {
8700                overlapping_attachments[i].push_back(j);
8701                overlapping_attachments[j].push_back(i);
8702                continue;
8703            }
8704            auto view_data_i = my_data->imageViewMap.find(viewi);
8705            auto view_data_j = my_data->imageViewMap.find(viewj);
8706            if (view_data_i == my_data->imageViewMap.end() || view_data_j == my_data->imageViewMap.end()) {
8707                continue;
8708            }
8709            if (view_data_i->second.image == view_data_j->second.image &&
8710                isRegionOverlapping(view_data_i->second.subresourceRange, view_data_j->second.subresourceRange)) {
8711                overlapping_attachments[i].push_back(j);
8712                overlapping_attachments[j].push_back(i);
8713                continue;
8714            }
8715            auto image_data_i = my_data->imageMap.find(view_data_i->second.image);
8716            auto image_data_j = my_data->imageMap.find(view_data_j->second.image);
8717            if (image_data_i == my_data->imageMap.end() || image_data_j == my_data->imageMap.end()) {
8718                continue;
8719            }
8720            if (image_data_i->second.mem == image_data_j->second.mem &&
8721                isRangeOverlapping(image_data_i->second.memOffset, image_data_i->second.memSize, image_data_j->second.memOffset,
8722                                   image_data_j->second.memSize)) {
8723                overlapping_attachments[i].push_back(j);
8724                overlapping_attachments[j].push_back(i);
8725            }
8726        }
8727    }
8728    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
8729        uint32_t attachment = i;
8730        for (auto other_attachment : overlapping_attachments[i]) {
8731            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8732                skip_call |=
8733                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8734                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
8735                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
8736                            attachment, other_attachment);
8737            }
8738            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8739                skip_call |=
8740                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8741                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
8742                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
8743                            other_attachment, attachment);
8744            }
8745        }
8746    }
8747    // Find for each attachment the subpasses that use them.
8748    unordered_set<uint32_t> attachmentIndices;
8749    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8750        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8751        attachmentIndices.clear();
8752        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8753            uint32_t attachment = subpass.pInputAttachments[j].attachment;
8754            input_attachment_to_subpass[attachment].push_back(i);
8755            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8756                input_attachment_to_subpass[overlapping_attachment].push_back(i);
8757            }
8758        }
8759        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8760            uint32_t attachment = subpass.pColorAttachments[j].attachment;
8761            output_attachment_to_subpass[attachment].push_back(i);
8762            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8763                output_attachment_to_subpass[overlapping_attachment].push_back(i);
8764            }
8765            attachmentIndices.insert(attachment);
8766        }
8767        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8768            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
8769            output_attachment_to_subpass[attachment].push_back(i);
8770            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8771                output_attachment_to_subpass[overlapping_attachment].push_back(i);
8772            }
8773
8774            if (attachmentIndices.count(attachment)) {
8775                skip_call |=
8776                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
8777                            0, __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8778                            "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).",
8779                            attachment, i);
8780            }
8781        }
8782    }
8783    // If there is a dependency needed make sure one exists
8784    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8785        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8786        // If the attachment is an input then all subpasses that output must have a dependency relationship
8787        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8788            const uint32_t &attachment = subpass.pInputAttachments[j].attachment;
8789            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8790        }
8791        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
8792        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8793            const uint32_t &attachment = subpass.pColorAttachments[j].attachment;
8794            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8795            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8796        }
8797        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8798            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
8799            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8800            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8801        }
8802    }
8803    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
8804    // written.
8805    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8806        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8807        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8808            CheckPreserved(my_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
8809        }
8810    }
8811    return skip_call;
8812}
8813
8814static bool ValidateLayouts(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
8815    bool skip = false;
8816
8817    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8818        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8819        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8820            if (subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL &&
8821                subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
8822                if (subpass.pInputAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
8823                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
8824                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8825                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8826                                    "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
8827                } else {
8828                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8829                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8830                                    "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
8831                                    string_VkImageLayout(subpass.pInputAttachments[j].layout));
8832                }
8833            }
8834        }
8835        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8836            if (subpass.pColorAttachments[j].layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
8837                if (subpass.pColorAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
8838                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
8839                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8840                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8841                                    "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
8842                } else {
8843                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8844                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8845                                    "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
8846                                    string_VkImageLayout(subpass.pColorAttachments[j].layout));
8847                }
8848            }
8849        }
8850        if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
8851            if (subpass.pDepthStencilAttachment->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) {
8852                if (subpass.pDepthStencilAttachment->layout == VK_IMAGE_LAYOUT_GENERAL) {
8853                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
8854                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8855                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8856                                    "Layout for depth attachment is GENERAL but should be DEPTH_STENCIL_ATTACHMENT_OPTIMAL.");
8857                } else {
8858                    skip |=
8859                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8860                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8861                                "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL or GENERAL.",
8862                                string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
8863                }
8864            }
8865        }
8866    }
8867    return skip;
8868}
8869
8870static bool CreatePassDAG(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
8871                          std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
8872    bool skip_call = false;
8873    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8874        DAGNode &subpass_node = subpass_to_node[i];
8875        subpass_node.pass = i;
8876    }
8877    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
8878        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
8879        if (dependency.srcSubpass > dependency.dstSubpass && dependency.srcSubpass != VK_SUBPASS_EXTERNAL &&
8880            dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
8881            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8882                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
8883                                 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
8884        } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL && dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
8885            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8886                                 DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
8887        } else if (dependency.srcSubpass == dependency.dstSubpass) {
8888            has_self_dependency[dependency.srcSubpass] = true;
8889        }
8890        if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
8891            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
8892        }
8893        if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL) {
8894            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
8895        }
8896    }
8897    return skip_call;
8898}
8899
8900
8901VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
8902                                                                    const VkAllocationCallbacks *pAllocator,
8903                                                                    VkShaderModule *pShaderModule) {
8904    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8905    bool skip_call = false;
8906    if (!shader_is_spirv(pCreateInfo)) {
8907        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
8908                             /* dev */ 0, __LINE__, SHADER_CHECKER_NON_SPIRV_SHADER, "SC", "Shader is not SPIR-V");
8909    }
8910
8911    if (skip_call)
8912        return VK_ERROR_VALIDATION_FAILED_EXT;
8913
8914    VkResult res = my_data->device_dispatch_table->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
8915
8916    if (res == VK_SUCCESS) {
8917        std::lock_guard<std::mutex> lock(global_lock);
8918        my_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
8919    }
8920    return res;
8921}
8922
8923VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
8924                                                                  const VkAllocationCallbacks *pAllocator,
8925                                                                  VkRenderPass *pRenderPass) {
8926    bool skip_call = false;
8927    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8928    std::unique_lock<std::mutex> lock(global_lock);
8929    // Create DAG
8930    std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
8931    std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
8932    skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
8933    // Validate
8934    skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
8935    if (skip_call) {
8936        lock.unlock();
8937        return VK_ERROR_VALIDATION_FAILED_EXT;
8938    }
8939    lock.unlock();
8940    VkResult result = dev_data->device_dispatch_table->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
8941    if (VK_SUCCESS == result) {
8942        lock.lock();
8943        // TODOSC : Merge in tracking of renderpass from shader_checker
8944        // Shadow create info and store in map
8945        VkRenderPassCreateInfo *localRPCI = new VkRenderPassCreateInfo(*pCreateInfo);
8946        if (pCreateInfo->pAttachments) {
8947            localRPCI->pAttachments = new VkAttachmentDescription[localRPCI->attachmentCount];
8948            memcpy((void *)localRPCI->pAttachments, pCreateInfo->pAttachments,
8949                   localRPCI->attachmentCount * sizeof(VkAttachmentDescription));
8950        }
8951        if (pCreateInfo->pSubpasses) {
8952            localRPCI->pSubpasses = new VkSubpassDescription[localRPCI->subpassCount];
8953            memcpy((void *)localRPCI->pSubpasses, pCreateInfo->pSubpasses, localRPCI->subpassCount * sizeof(VkSubpassDescription));
8954
8955            for (uint32_t i = 0; i < localRPCI->subpassCount; i++) {
8956                VkSubpassDescription *subpass = (VkSubpassDescription *)&localRPCI->pSubpasses[i];
8957                const uint32_t attachmentCount = subpass->inputAttachmentCount +
8958                                                 subpass->colorAttachmentCount * (1 + (subpass->pResolveAttachments ? 1 : 0)) +
8959                                                 ((subpass->pDepthStencilAttachment) ? 1 : 0) + subpass->preserveAttachmentCount;
8960                VkAttachmentReference *attachments = new VkAttachmentReference[attachmentCount];
8961
8962                memcpy(attachments, subpass->pInputAttachments, sizeof(attachments[0]) * subpass->inputAttachmentCount);
8963                subpass->pInputAttachments = attachments;
8964                attachments += subpass->inputAttachmentCount;
8965
8966                memcpy(attachments, subpass->pColorAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
8967                subpass->pColorAttachments = attachments;
8968                attachments += subpass->colorAttachmentCount;
8969
8970                if (subpass->pResolveAttachments) {
8971                    memcpy(attachments, subpass->pResolveAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
8972                    subpass->pResolveAttachments = attachments;
8973                    attachments += subpass->colorAttachmentCount;
8974                }
8975
8976                if (subpass->pDepthStencilAttachment) {
8977                    memcpy(attachments, subpass->pDepthStencilAttachment, sizeof(attachments[0]) * 1);
8978                    subpass->pDepthStencilAttachment = attachments;
8979                    attachments += 1;
8980                }
8981
8982                memcpy(attachments, subpass->pPreserveAttachments, sizeof(attachments[0]) * subpass->preserveAttachmentCount);
8983                subpass->pPreserveAttachments = &attachments->attachment;
8984            }
8985        }
8986        if (pCreateInfo->pDependencies) {
8987            localRPCI->pDependencies = new VkSubpassDependency[localRPCI->dependencyCount];
8988            memcpy((void *)localRPCI->pDependencies, pCreateInfo->pDependencies,
8989                   localRPCI->dependencyCount * sizeof(VkSubpassDependency));
8990        }
8991        dev_data->renderPassMap[*pRenderPass] = new RENDER_PASS_NODE(localRPCI);
8992        dev_data->renderPassMap[*pRenderPass]->hasSelfDependency = has_self_dependency;
8993        dev_data->renderPassMap[*pRenderPass]->subpassToNode = subpass_to_node;
8994#if MTMERGESOURCE
8995        // MTMTODO : Merge with code from above to eliminate duplication
8996        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8997            VkAttachmentDescription desc = pCreateInfo->pAttachments[i];
8998            MT_PASS_ATTACHMENT_INFO pass_info;
8999            pass_info.load_op = desc.loadOp;
9000            pass_info.store_op = desc.storeOp;
9001            pass_info.attachment = i;
9002            dev_data->renderPassMap[*pRenderPass]->attachments.push_back(pass_info);
9003        }
9004        // TODO: Maybe fill list and then copy instead of locking
9005        std::unordered_map<uint32_t, bool> &attachment_first_read = dev_data->renderPassMap[*pRenderPass]->attachment_first_read;
9006        std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout =
9007            dev_data->renderPassMap[*pRenderPass]->attachment_first_layout;
9008        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9009            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9010            if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
9011                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9012                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9013                                     "Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", i);
9014            }
9015            for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9016                uint32_t attachment = subpass.pPreserveAttachments[j];
9017                if (attachment >= pCreateInfo->attachmentCount) {
9018                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9019                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9020                                         "Preserve attachment %d cannot be greater than the total number of attachments %d.",
9021                                         attachment, pCreateInfo->attachmentCount);
9022                }
9023            }
9024            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9025                uint32_t attachment;
9026                if (subpass.pResolveAttachments) {
9027                    attachment = subpass.pResolveAttachments[j].attachment;
9028                    if (attachment >= pCreateInfo->attachmentCount && attachment != VK_ATTACHMENT_UNUSED) {
9029                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9030                                             __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9031                                             "Color attachment %d cannot be greater than the total number of attachments %d.",
9032                                             attachment, pCreateInfo->attachmentCount);
9033                        continue;
9034                    }
9035                }
9036                attachment = subpass.pColorAttachments[j].attachment;
9037                if (attachment >= pCreateInfo->attachmentCount) {
9038                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9039                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9040                                         "Color attachment %d cannot be greater than the total number of attachments %d.",
9041                                         attachment, pCreateInfo->attachmentCount);
9042                    continue;
9043                }
9044                if (attachment_first_read.count(attachment))
9045                    continue;
9046                attachment_first_read.insert(std::make_pair(attachment, false));
9047                attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
9048            }
9049            if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9050                uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9051                if (attachment >= pCreateInfo->attachmentCount) {
9052                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9053                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9054                                         "Depth stencil attachment %d cannot be greater than the total number of attachments %d.",
9055                                         attachment, pCreateInfo->attachmentCount);
9056                    continue;
9057                }
9058                if (attachment_first_read.count(attachment))
9059                    continue;
9060                attachment_first_read.insert(std::make_pair(attachment, false));
9061                attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
9062            }
9063            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9064                uint32_t attachment = subpass.pInputAttachments[j].attachment;
9065                if (attachment >= pCreateInfo->attachmentCount) {
9066                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9067                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9068                                         "Input attachment %d cannot be greater than the total number of attachments %d.",
9069                                         attachment, pCreateInfo->attachmentCount);
9070                    continue;
9071                }
9072                if (attachment_first_read.count(attachment))
9073                    continue;
9074                attachment_first_read.insert(std::make_pair(attachment, true));
9075                attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
9076            }
9077        }
9078#endif
9079        lock.unlock();
9080    }
9081    return result;
9082}
9083// Free the renderpass shadow
9084static void deleteRenderPasses(layer_data *my_data) {
9085    if (my_data->renderPassMap.size() <= 0)
9086        return;
9087    for (auto ii = my_data->renderPassMap.begin(); ii != my_data->renderPassMap.end(); ++ii) {
9088        const VkRenderPassCreateInfo *pRenderPassInfo = (*ii).second->pCreateInfo;
9089        delete[] pRenderPassInfo->pAttachments;
9090        if (pRenderPassInfo->pSubpasses) {
9091            for (uint32_t i = 0; i < pRenderPassInfo->subpassCount; ++i) {
9092                // Attachements are all allocated in a block, so just need to
9093                //  find the first non-null one to delete
9094                if (pRenderPassInfo->pSubpasses[i].pInputAttachments) {
9095                    delete[] pRenderPassInfo->pSubpasses[i].pInputAttachments;
9096                } else if (pRenderPassInfo->pSubpasses[i].pColorAttachments) {
9097                    delete[] pRenderPassInfo->pSubpasses[i].pColorAttachments;
9098                } else if (pRenderPassInfo->pSubpasses[i].pResolveAttachments) {
9099                    delete[] pRenderPassInfo->pSubpasses[i].pResolveAttachments;
9100                } else if (pRenderPassInfo->pSubpasses[i].pPreserveAttachments) {
9101                    delete[] pRenderPassInfo->pSubpasses[i].pPreserveAttachments;
9102                }
9103            }
9104            delete[] pRenderPassInfo->pSubpasses;
9105        }
9106        delete[] pRenderPassInfo->pDependencies;
9107        delete pRenderPassInfo;
9108        delete (*ii).second;
9109    }
9110    my_data->renderPassMap.clear();
9111}
9112
9113static bool VerifyFramebufferAndRenderPassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) {
9114    bool skip_call = false;
9115    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9116    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9117    const VkRenderPassCreateInfo *pRenderPassInfo = dev_data->renderPassMap[pRenderPassBegin->renderPass]->pCreateInfo;
9118    const VkFramebufferCreateInfo framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer].createInfo;
9119    if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
9120        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9121                             DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
9122                                                                 "with a different number of attachments.");
9123    }
9124    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9125        const VkImageView &image_view = framebufferInfo.pAttachments[i];
9126        auto image_data = dev_data->imageViewMap.find(image_view);
9127        assert(image_data != dev_data->imageViewMap.end());
9128        const VkImage &image = image_data->second.image;
9129        const VkImageSubresourceRange &subRange = image_data->second.subresourceRange;
9130        IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
9131                                             pRenderPassInfo->pAttachments[i].initialLayout};
9132        // TODO: Do not iterate over every possibility - consolidate where possible
9133        for (uint32_t j = 0; j < subRange.levelCount; j++) {
9134            uint32_t level = subRange.baseMipLevel + j;
9135            for (uint32_t k = 0; k < subRange.layerCount; k++) {
9136                uint32_t layer = subRange.baseArrayLayer + k;
9137                VkImageSubresource sub = {subRange.aspectMask, level, layer};
9138                IMAGE_CMD_BUF_LAYOUT_NODE node;
9139                if (!FindLayout(pCB, image, sub, node)) {
9140                    SetLayout(pCB, image, sub, newNode);
9141                    continue;
9142                }
9143                if (newNode.layout != node.layout) {
9144                    skip_call |=
9145                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9146                                DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using attachment %i "
9147                                                                    "where the "
9148                                                                    "initial layout is %s and the layout of the attachment at the "
9149                                                                    "start of the render pass is %s. The layouts must match.",
9150                                i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout));
9151                }
9152            }
9153        }
9154    }
9155    return skip_call;
9156}
9157
9158static void TransitionSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
9159                                     const int subpass_index) {
9160    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9161    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9162    auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9163    if (render_pass_data == dev_data->renderPassMap.end()) {
9164        return;
9165    }
9166    const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo;
9167    auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer);
9168    if (framebuffer_data == dev_data->frameBufferMap.end()) {
9169        return;
9170    }
9171    const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo;
9172    const VkSubpassDescription &subpass = pRenderPassInfo->pSubpasses[subpass_index];
9173    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9174        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pInputAttachments[j].attachment];
9175        SetLayout(dev_data, pCB, image_view, subpass.pInputAttachments[j].layout);
9176    }
9177    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9178        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pColorAttachments[j].attachment];
9179        SetLayout(dev_data, pCB, image_view, subpass.pColorAttachments[j].layout);
9180    }
9181    if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
9182        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pDepthStencilAttachment->attachment];
9183        SetLayout(dev_data, pCB, image_view, subpass.pDepthStencilAttachment->layout);
9184    }
9185}
9186
9187static bool validatePrimaryCommandBuffer(const layer_data *my_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) {
9188    bool skip_call = false;
9189    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
9190        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9191                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.",
9192                             cmd_name.c_str());
9193    }
9194    return skip_call;
9195}
9196
9197static void TransitionFinalSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) {
9198    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9199    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9200    auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9201    if (render_pass_data == dev_data->renderPassMap.end()) {
9202        return;
9203    }
9204    const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo;
9205    auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer);
9206    if (framebuffer_data == dev_data->frameBufferMap.end()) {
9207        return;
9208    }
9209    const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo;
9210    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9211        const VkImageView &image_view = framebufferInfo.pAttachments[i];
9212        SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
9213    }
9214}
9215
9216static bool VerifyRenderAreaBounds(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
9217    bool skip_call = false;
9218    const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo;
9219    if (pRenderPassBegin->renderArea.offset.x < 0 ||
9220        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
9221        pRenderPassBegin->renderArea.offset.y < 0 ||
9222        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
9223        skip_call |= static_cast<bool>(log_msg(
9224            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9225            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
9226            "Cannot execute a render pass with renderArea not within the bound of the "
9227            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
9228            "height %d.",
9229            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
9230            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
9231    }
9232    return skip_call;
9233}
9234
9235VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
9236vkCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
9237    bool skipCall = false;
9238    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9239    std::unique_lock<std::mutex> lock(global_lock);
9240    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9241    if (pCB) {
9242        if (pRenderPassBegin && pRenderPassBegin->renderPass) {
9243#if MTMERGE
9244            auto pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9245            if (pass_data != dev_data->renderPassMap.end()) {
9246                RENDER_PASS_NODE* pRPNode = pass_data->second;
9247                pRPNode->fb = pRenderPassBegin->framebuffer;
9248                auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
9249                for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
9250                    MT_FB_ATTACHMENT_INFO &fb_info = dev_data->frameBufferMap[pRPNode->fb].attachments[i];
9251                    if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
9252                        if (cb_data != dev_data->commandBufferMap.end()) {
9253                            std::function<bool()> function = [=]() {
9254                                set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9255                                return false;
9256                            };
9257                            cb_data->second->validate_functions.push_back(function);
9258                        }
9259                        VkImageLayout &attachment_layout = pRPNode->attachment_first_layout[pRPNode->attachments[i].attachment];
9260                        if (attachment_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL ||
9261                            attachment_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
9262                            skipCall |=
9263                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9264                                        VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, (uint64_t)(pRenderPassBegin->renderPass), __LINE__,
9265                                        MEMTRACK_INVALID_LAYOUT, "MEM", "Cannot clear attachment %d with invalid first layout %d.",
9266                                        pRPNode->attachments[i].attachment, attachment_layout);
9267                        }
9268                    } else if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE) {
9269                        if (cb_data != dev_data->commandBufferMap.end()) {
9270                            std::function<bool()> function = [=]() {
9271                                set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9272                                return false;
9273                            };
9274                            cb_data->second->validate_functions.push_back(function);
9275                        }
9276                    } else if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_LOAD) {
9277                        if (cb_data != dev_data->commandBufferMap.end()) {
9278                            std::function<bool()> function = [=]() {
9279                                return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9280                            };
9281                            cb_data->second->validate_functions.push_back(function);
9282                        }
9283                    }
9284                    if (pRPNode->attachment_first_read[pRPNode->attachments[i].attachment]) {
9285                        if (cb_data != dev_data->commandBufferMap.end()) {
9286                            std::function<bool()> function = [=]() {
9287                                return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9288                            };
9289                            cb_data->second->validate_functions.push_back(function);
9290                        }
9291                    }
9292                }
9293            }
9294#endif
9295            skipCall |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
9296            skipCall |= VerifyFramebufferAndRenderPassLayouts(commandBuffer, pRenderPassBegin);
9297            auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9298            if (render_pass_data != dev_data->renderPassMap.end()) {
9299                skipCall |= ValidateDependencies(dev_data, pRenderPassBegin, render_pass_data->second->subpassToNode);
9300            }
9301            skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBeginRenderPass");
9302            skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdBeginRenderPass");
9303            skipCall |= addCmd(dev_data, pCB, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
9304            pCB->activeRenderPass = pRenderPassBegin->renderPass;
9305            // This is a shallow copy as that is all that is needed for now
9306            pCB->activeRenderPassBeginInfo = *pRenderPassBegin;
9307            pCB->activeSubpass = 0;
9308            pCB->activeSubpassContents = contents;
9309            pCB->framebuffers.insert(pRenderPassBegin->framebuffer);
9310            // Connect this framebuffer to this cmdBuffer
9311            dev_data->frameBufferMap[pRenderPassBegin->framebuffer].referencingCmdBuffers.insert(pCB->commandBuffer);
9312        } else {
9313            skipCall |=
9314                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9315                        DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()");
9316        }
9317    }
9318    lock.unlock();
9319    if (!skipCall) {
9320        dev_data->device_dispatch_table->CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
9321    }
9322}
9323
9324VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
9325    bool skipCall = false;
9326    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9327    std::unique_lock<std::mutex> lock(global_lock);
9328    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9329    if (pCB) {
9330        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass");
9331        skipCall |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
9332        pCB->activeSubpass++;
9333        pCB->activeSubpassContents = contents;
9334        TransitionSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
9335        if (pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline) {
9336            skipCall |= validatePipelineState(dev_data, pCB, VK_PIPELINE_BIND_POINT_GRAPHICS,
9337                                              pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
9338        }
9339        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass");
9340    }
9341    lock.unlock();
9342    if (!skipCall)
9343        dev_data->device_dispatch_table->CmdNextSubpass(commandBuffer, contents);
9344}
9345
9346VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(VkCommandBuffer commandBuffer) {
9347    bool skipCall = false;
9348    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9349    std::unique_lock<std::mutex> lock(global_lock);
9350#if MTMERGESOURCE
9351    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
9352    if (cb_data != dev_data->commandBufferMap.end()) {
9353        auto pass_data = dev_data->renderPassMap.find(cb_data->second->activeRenderPass);
9354        if (pass_data != dev_data->renderPassMap.end()) {
9355            RENDER_PASS_NODE* pRPNode = pass_data->second;
9356            for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
9357                MT_FB_ATTACHMENT_INFO &fb_info = dev_data->frameBufferMap[pRPNode->fb].attachments[i];
9358                if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_STORE) {
9359                    if (cb_data != dev_data->commandBufferMap.end()) {
9360                        std::function<bool()> function = [=]() {
9361                            set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9362                            return false;
9363                        };
9364                        cb_data->second->validate_functions.push_back(function);
9365                    }
9366                } else if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_DONT_CARE) {
9367                    if (cb_data != dev_data->commandBufferMap.end()) {
9368                        std::function<bool()> function = [=]() {
9369                            set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9370                            return false;
9371                        };
9372                        cb_data->second->validate_functions.push_back(function);
9373                    }
9374                }
9375            }
9376        }
9377    }
9378#endif
9379    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9380    if (pCB) {
9381        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass");
9382        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass");
9383        skipCall |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
9384        TransitionFinalSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo);
9385        pCB->activeRenderPass = 0;
9386        pCB->activeSubpass = 0;
9387    }
9388    lock.unlock();
9389    if (!skipCall)
9390        dev_data->device_dispatch_table->CmdEndRenderPass(commandBuffer);
9391}
9392
9393static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass,
9394                                        VkRenderPass primaryPass, uint32_t primaryAttach, uint32_t secondaryAttach,
9395                                        const char *msg) {
9396    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9397                   DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9398                   "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64
9399                   " that is not compatible with the current render pass %" PRIx64 "."
9400                   "Attachment %" PRIu32 " is not compatible with %" PRIu32 ". %s",
9401                   (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass), primaryAttach, secondaryAttach,
9402                   msg);
9403}
9404
9405static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9406                                            uint32_t primaryAttach, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass,
9407                                            uint32_t secondaryAttach, bool is_multi) {
9408    bool skip_call = false;
9409    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9410    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9411    if (primary_data->second->pCreateInfo->attachmentCount <= primaryAttach) {
9412        primaryAttach = VK_ATTACHMENT_UNUSED;
9413    }
9414    if (secondary_data->second->pCreateInfo->attachmentCount <= secondaryAttach) {
9415        secondaryAttach = VK_ATTACHMENT_UNUSED;
9416    }
9417    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
9418        return skip_call;
9419    }
9420    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
9421        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9422                                                 secondaryAttach, "The first is unused while the second is not.");
9423        return skip_call;
9424    }
9425    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
9426        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9427                                                 secondaryAttach, "The second is unused while the first is not.");
9428        return skip_call;
9429    }
9430    if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].format !=
9431        secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].format) {
9432        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9433                                                 secondaryAttach, "They have different formats.");
9434    }
9435    if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].samples !=
9436        secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].samples) {
9437        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9438                                                 secondaryAttach, "They have different samples.");
9439    }
9440    if (is_multi &&
9441        primary_data->second->pCreateInfo->pAttachments[primaryAttach].flags !=
9442            secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].flags) {
9443        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9444                                                 secondaryAttach, "They have different flags.");
9445    }
9446    return skip_call;
9447}
9448
9449static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9450                                         VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass, const int subpass,
9451                                         bool is_multi) {
9452    bool skip_call = false;
9453    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9454    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9455    const VkSubpassDescription &primary_desc = primary_data->second->pCreateInfo->pSubpasses[subpass];
9456    const VkSubpassDescription &secondary_desc = secondary_data->second->pCreateInfo->pSubpasses[subpass];
9457    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
9458    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
9459        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
9460        if (i < primary_desc.inputAttachmentCount) {
9461            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
9462        }
9463        if (i < secondary_desc.inputAttachmentCount) {
9464            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
9465        }
9466        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_input_attach, secondaryBuffer,
9467                                                     secondaryPass, secondary_input_attach, is_multi);
9468    }
9469    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
9470    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
9471        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
9472        if (i < primary_desc.colorAttachmentCount) {
9473            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
9474        }
9475        if (i < secondary_desc.colorAttachmentCount) {
9476            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
9477        }
9478        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_color_attach, secondaryBuffer,
9479                                                     secondaryPass, secondary_color_attach, is_multi);
9480        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
9481        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
9482            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
9483        }
9484        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
9485            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
9486        }
9487        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_resolve_attach, secondaryBuffer,
9488                                                     secondaryPass, secondary_resolve_attach, is_multi);
9489    }
9490    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
9491    if (primary_desc.pDepthStencilAttachment) {
9492        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
9493    }
9494    if (secondary_desc.pDepthStencilAttachment) {
9495        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
9496    }
9497    skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_depthstencil_attach, secondaryBuffer,
9498                                                 secondaryPass, secondary_depthstencil_attach, is_multi);
9499    return skip_call;
9500}
9501
9502static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9503                                            VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass) {
9504    bool skip_call = false;
9505    // Early exit if renderPass objects are identical (and therefore compatible)
9506    if (primaryPass == secondaryPass)
9507        return skip_call;
9508    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9509    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9510    if (primary_data == dev_data->renderPassMap.end() || primary_data->second == nullptr) {
9511        skip_call |=
9512            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9513                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9514                    "vkCmdExecuteCommands() called w/ invalid current Cmd Buffer %p which has invalid render pass %" PRIx64 ".",
9515                    (void *)primaryBuffer, (uint64_t)(primaryPass));
9516        return skip_call;
9517    }
9518    if (secondary_data == dev_data->renderPassMap.end() || secondary_data->second == nullptr) {
9519        skip_call |=
9520            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9521                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9522                    "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer %p which has invalid render pass %" PRIx64 ".",
9523                    (void *)secondaryBuffer, (uint64_t)(secondaryPass));
9524        return skip_call;
9525    }
9526    if (primary_data->second->pCreateInfo->subpassCount != secondary_data->second->pCreateInfo->subpassCount) {
9527        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9528                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9529                             "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64
9530                             " that is not compatible with the current render pass %" PRIx64 "."
9531                             "They have a different number of subpasses.",
9532                             (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass));
9533        return skip_call;
9534    }
9535    bool is_multi = primary_data->second->pCreateInfo->subpassCount > 1;
9536    for (uint32_t i = 0; i < primary_data->second->pCreateInfo->subpassCount; ++i) {
9537        skip_call |=
9538            validateSubpassCompatibility(dev_data, primaryBuffer, primaryPass, secondaryBuffer, secondaryPass, i, is_multi);
9539    }
9540    return skip_call;
9541}
9542
9543static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
9544                                VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
9545    bool skip_call = false;
9546    if (!pSubCB->beginInfo.pInheritanceInfo) {
9547        return skip_call;
9548    }
9549    VkFramebuffer primary_fb = dev_data->renderPassMap[pCB->activeRenderPass]->fb;
9550    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
9551    if (secondary_fb != VK_NULL_HANDLE) {
9552        if (primary_fb != secondary_fb) {
9553            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9554                                 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9555                                 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a framebuffer %" PRIx64
9556                                 " that is not compatible with the current framebuffer %" PRIx64 ".",
9557                                 (void *)secondaryBuffer, (uint64_t)(secondary_fb), (uint64_t)(primary_fb));
9558        }
9559        auto fb_data = dev_data->frameBufferMap.find(secondary_fb);
9560        if (fb_data == dev_data->frameBufferMap.end()) {
9561            skip_call |=
9562                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9563                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
9564                                                                          "which has invalid framebuffer %" PRIx64 ".",
9565                        (void *)secondaryBuffer, (uint64_t)(secondary_fb));
9566            return skip_call;
9567        }
9568        skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb_data->second.createInfo.renderPass,
9569                                                     secondaryBuffer, pSubCB->beginInfo.pInheritanceInfo->renderPass);
9570    }
9571    return skip_call;
9572}
9573
9574static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
9575    bool skipCall = false;
9576    unordered_set<int> activeTypes;
9577    for (auto queryObject : pCB->activeQueries) {
9578        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9579        if (queryPoolData != dev_data->queryPoolMap.end()) {
9580            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
9581                pSubCB->beginInfo.pInheritanceInfo) {
9582                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
9583                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
9584                    skipCall |= log_msg(
9585                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9586                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9587                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
9588                        "which has invalid active query pool %" PRIx64 ". Pipeline statistics is being queried so the command "
9589                        "buffer must have all bits set on the queryPool.",
9590                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first));
9591                }
9592            }
9593            activeTypes.insert(queryPoolData->second.createInfo.queryType);
9594        }
9595    }
9596    for (auto queryObject : pSubCB->startedQueries) {
9597        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9598        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
9599            skipCall |=
9600                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9601                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9602                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
9603                        "which has invalid active query pool %" PRIx64 "of type %d but a query of that type has been started on "
9604                        "secondary Cmd Buffer %p.",
9605                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first),
9606                        queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer));
9607        }
9608    }
9609    return skipCall;
9610}
9611
9612VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
9613vkCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
9614    bool skipCall = false;
9615    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9616    std::unique_lock<std::mutex> lock(global_lock);
9617    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9618    if (pCB) {
9619        GLOBAL_CB_NODE *pSubCB = NULL;
9620        for (uint32_t i = 0; i < commandBuffersCount; i++) {
9621            pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
9622            if (!pSubCB) {
9623                skipCall |=
9624                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9625                            DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9626                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p in element %u of pCommandBuffers array.",
9627                            (void *)pCommandBuffers[i], i);
9628            } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
9629                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9630                                    __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9631                                    "vkCmdExecuteCommands() called w/ Primary Cmd Buffer %p in element %u of pCommandBuffers "
9632                                    "array. All cmd buffers in pCommandBuffers array must be secondary.",
9633                                    (void *)pCommandBuffers[i], i);
9634            } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
9635                if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
9636                    skipCall |= log_msg(
9637                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9638                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
9639                        "vkCmdExecuteCommands(): Secondary Command Buffer (%p) executed within render pass (%#" PRIxLEAST64
9640                        ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
9641                        (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass);
9642                } else {
9643                    // Make sure render pass is compatible with parent command buffer pass if has continue
9644                    skipCall |= validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass, pCommandBuffers[i],
9645                                                                pSubCB->beginInfo.pInheritanceInfo->renderPass);
9646                    skipCall |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
9647                }
9648                string errorString = "";
9649                if (!verify_renderpass_compatibility(dev_data, pCB->activeRenderPass,
9650                                                     pSubCB->beginInfo.pInheritanceInfo->renderPass, errorString)) {
9651                    skipCall |= log_msg(
9652                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9653                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
9654                        "vkCmdExecuteCommands(): Secondary Command Buffer (%p) w/ render pass (%#" PRIxLEAST64
9655                        ") is incompatible w/ primary command buffer (%p) w/ render pass (%#" PRIxLEAST64 ") due to: %s",
9656                        (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer,
9657                        (uint64_t)pCB->activeRenderPass, errorString.c_str());
9658                }
9659                //  If framebuffer for secondary CB is not NULL, then it must match FB from vkCmdBeginRenderPass()
9660                //   that this CB will be executed in AND framebuffer must have been created w/ RP compatible w/ renderpass
9661                if (pSubCB->beginInfo.pInheritanceInfo->framebuffer) {
9662                    if (pSubCB->beginInfo.pInheritanceInfo->framebuffer != pCB->activeRenderPassBeginInfo.framebuffer) {
9663                        skipCall |= log_msg(
9664                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9665                            (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS",
9666                            "vkCmdExecuteCommands(): Secondary Command Buffer (%p) references framebuffer (%#" PRIxLEAST64
9667                            ") that does not match framebuffer (%#" PRIxLEAST64 ") in active renderpass (%#" PRIxLEAST64 ").",
9668                            (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->framebuffer,
9669                            (uint64_t)pCB->activeRenderPassBeginInfo.framebuffer, (uint64_t)pCB->activeRenderPass);
9670                    }
9671                }
9672            }
9673            // TODO(mlentine): Move more logic into this method
9674            skipCall |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
9675            skipCall |= validateCommandBufferState(dev_data, pSubCB);
9676            // Secondary cmdBuffers are considered pending execution starting w/
9677            // being recorded
9678            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
9679                if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
9680                    skipCall |= log_msg(
9681                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9682                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
9683                        "Attempt to simultaneously execute CB %#" PRIxLEAST64 " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
9684                        "set!",
9685                        (uint64_t)(pCB->commandBuffer));
9686                }
9687                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
9688                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
9689                    skipCall |= log_msg(
9690                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9691                        (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
9692                        "vkCmdExecuteCommands(): Secondary Command Buffer (%#" PRIxLEAST64
9693                        ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
9694                        "(%#" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
9695                                          "set, even though it does.",
9696                        (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer));
9697                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
9698                }
9699            }
9700            if (!pCB->activeQueries.empty() && !dev_data->phys_dev_properties.features.inheritedQueries) {
9701                skipCall |=
9702                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9703                            reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
9704                            "vkCmdExecuteCommands(): Secondary Command Buffer "
9705                            "(%#" PRIxLEAST64 ") cannot be submitted with a query in "
9706                            "flight and inherited queries not "
9707                            "supported on this device.",
9708                            reinterpret_cast<uint64_t>(pCommandBuffers[i]));
9709            }
9710            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
9711            pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
9712            dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
9713        }
9714        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands");
9715        skipCall |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
9716    }
9717    lock.unlock();
9718    if (!skipCall)
9719        dev_data->device_dispatch_table->CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
9720}
9721
9722static bool ValidateMapImageLayouts(VkDevice device, VkDeviceMemory mem) {
9723    bool skip_call = false;
9724    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9725    auto mem_data = dev_data->memObjMap.find(mem);
9726    if ((mem_data != dev_data->memObjMap.end()) && (mem_data->second.image != VK_NULL_HANDLE)) {
9727        std::vector<VkImageLayout> layouts;
9728        if (FindLayouts(dev_data, mem_data->second.image, layouts)) {
9729            for (auto layout : layouts) {
9730                if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
9731                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9732                                         __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
9733                                                                                         "GENERAL or PREINITIALIZED are supported.",
9734                                         string_VkImageLayout(layout));
9735                }
9736            }
9737        }
9738    }
9739    return skip_call;
9740}
9741
9742VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
9743vkMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
9744    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9745
9746    bool skip_call = false;
9747    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9748    std::unique_lock<std::mutex> lock(global_lock);
9749#if MTMERGESOURCE
9750    DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
9751    if (pMemObj) {
9752        pMemObj->valid = true;
9753        if ((dev_data->phys_dev_mem_props.memoryTypes[pMemObj->allocInfo.memoryTypeIndex].propertyFlags &
9754             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
9755            skip_call =
9756                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9757                        (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM",
9758                        "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj %#" PRIxLEAST64, (uint64_t)mem);
9759        }
9760    }
9761    skip_call |= validateMemRange(dev_data, mem, offset, size);
9762    storeMemRanges(dev_data, mem, offset, size);
9763#endif
9764    skip_call |= ValidateMapImageLayouts(device, mem);
9765    lock.unlock();
9766
9767    if (!skip_call) {
9768        result = dev_data->device_dispatch_table->MapMemory(device, mem, offset, size, flags, ppData);
9769#if MTMERGESOURCE
9770        lock.lock();
9771        initializeAndTrackMemory(dev_data, mem, size, ppData);
9772        lock.unlock();
9773#endif
9774    }
9775    return result;
9776}
9777
9778#if MTMERGESOURCE
9779VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkUnmapMemory(VkDevice device, VkDeviceMemory mem) {
9780    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9781    bool skipCall = false;
9782
9783    std::unique_lock<std::mutex> lock(global_lock);
9784    skipCall |= deleteMemRanges(my_data, mem);
9785    lock.unlock();
9786    if (!skipCall) {
9787        my_data->device_dispatch_table->UnmapMemory(device, mem);
9788    }
9789}
9790
9791static bool validateMemoryIsMapped(layer_data *my_data, const char *funcName, uint32_t memRangeCount,
9792                                   const VkMappedMemoryRange *pMemRanges) {
9793    bool skipCall = false;
9794    for (uint32_t i = 0; i < memRangeCount; ++i) {
9795        auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
9796        if (mem_element != my_data->memObjMap.end()) {
9797            if (mem_element->second.memRange.offset > pMemRanges[i].offset) {
9798                skipCall |= log_msg(
9799                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9800                    (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
9801                    "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
9802                    "(" PRINTF_SIZE_T_SPECIFIER ").",
9803                    funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_element->second.memRange.offset));
9804            }
9805            if ((mem_element->second.memRange.size != VK_WHOLE_SIZE) &&
9806                ((mem_element->second.memRange.offset + mem_element->second.memRange.size) <
9807                 (pMemRanges[i].offset + pMemRanges[i].size))) {
9808                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9809                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
9810                                    MEMTRACK_INVALID_MAP, "MEM", "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER
9811                                                                 ") exceeds the Memory Object's upper-bound "
9812                                                                 "(" PRINTF_SIZE_T_SPECIFIER ").",
9813                                    funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
9814                                    static_cast<size_t>(mem_element->second.memRange.offset + mem_element->second.memRange.size));
9815            }
9816        }
9817    }
9818    return skipCall;
9819}
9820
9821static bool validateAndCopyNoncoherentMemoryToDriver(layer_data *my_data, uint32_t memRangeCount,
9822                                                     const VkMappedMemoryRange *pMemRanges) {
9823    bool skipCall = false;
9824    for (uint32_t i = 0; i < memRangeCount; ++i) {
9825        auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
9826        if (mem_element != my_data->memObjMap.end()) {
9827            if (mem_element->second.pData) {
9828                VkDeviceSize size = mem_element->second.memRange.size;
9829                VkDeviceSize half_size = (size / 2);
9830                char *data = static_cast<char *>(mem_element->second.pData);
9831                for (auto j = 0; j < half_size; ++j) {
9832                    if (data[j] != NoncoherentMemoryFillValue) {
9833                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9834                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
9835                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64,
9836                                            (uint64_t)pMemRanges[i].memory);
9837                    }
9838                }
9839                for (auto j = size + half_size; j < 2 * size; ++j) {
9840                    if (data[j] != NoncoherentMemoryFillValue) {
9841                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9842                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
9843                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64,
9844                                            (uint64_t)pMemRanges[i].memory);
9845                    }
9846                }
9847                memcpy(mem_element->second.pDriverData, static_cast<void *>(data + (size_t)(half_size)), (size_t)(size));
9848            }
9849        }
9850    }
9851    return skipCall;
9852}
9853
9854VK_LAYER_EXPORT VkResult VKAPI_CALL
9855vkFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
9856    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9857    bool skipCall = false;
9858    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9859
9860    std::unique_lock<std::mutex> lock(global_lock);
9861    skipCall |= validateAndCopyNoncoherentMemoryToDriver(my_data, memRangeCount, pMemRanges);
9862    skipCall |= validateMemoryIsMapped(my_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
9863    lock.unlock();
9864    if (!skipCall) {
9865        result = my_data->device_dispatch_table->FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
9866    }
9867    return result;
9868}
9869
9870VK_LAYER_EXPORT VkResult VKAPI_CALL
9871vkInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
9872    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9873    bool skipCall = false;
9874    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9875
9876    std::unique_lock<std::mutex> lock(global_lock);
9877    skipCall |= validateMemoryIsMapped(my_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
9878    lock.unlock();
9879    if (!skipCall) {
9880        result = my_data->device_dispatch_table->InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
9881    }
9882    return result;
9883}
9884#endif
9885
9886VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
9887    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9888    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9889    bool skipCall = false;
9890    std::unique_lock<std::mutex> lock(global_lock);
9891    auto image_node = dev_data->imageMap.find(image);
9892    if (image_node != dev_data->imageMap.end()) {
9893        // Track objects tied to memory
9894        uint64_t image_handle = reinterpret_cast<uint64_t&>(image);
9895        skipCall = set_mem_binding(dev_data, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
9896        VkMemoryRequirements memRequirements;
9897        lock.unlock();
9898        dev_data->device_dispatch_table->GetImageMemoryRequirements(device, image, &memRequirements);
9899        lock.lock();
9900        skipCall |= validate_buffer_image_aliasing(dev_data, image_handle, mem, memoryOffset, memRequirements,
9901                                                   dev_data->memObjMap[mem].imageRanges, dev_data->memObjMap[mem].bufferRanges,
9902                                                   VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
9903        print_mem_list(dev_data);
9904        lock.unlock();
9905        if (!skipCall) {
9906            result = dev_data->device_dispatch_table->BindImageMemory(device, image, mem, memoryOffset);
9907            lock.lock();
9908            dev_data->memObjMap[mem].image = image;
9909            image_node->second.mem = mem;
9910            image_node->second.memOffset = memoryOffset;
9911            image_node->second.memSize = memRequirements.size;
9912            lock.unlock();
9913        }
9914    } else {
9915        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9916                reinterpret_cast<const uint64_t &>(image), __LINE__, MEMTRACK_INVALID_OBJECT, "MT",
9917                "vkBindImageMemory: Cannot find invalid image %" PRIx64 ", has it already been deleted?",
9918                reinterpret_cast<const uint64_t &>(image));
9919    }
9920    return result;
9921}
9922
9923VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent(VkDevice device, VkEvent event) {
9924    bool skip_call = false;
9925    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9926    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9927    std::unique_lock<std::mutex> lock(global_lock);
9928    auto event_node = dev_data->eventMap.find(event);
9929    if (event_node != dev_data->eventMap.end()) {
9930        event_node->second.needsSignaled = false;
9931        event_node->second.stageMask = VK_PIPELINE_STAGE_HOST_BIT;
9932        if (event_node->second.in_use.load()) {
9933            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
9934                                 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9935                                 "Cannot call vkSetEvent() on event %" PRIxLEAST64 " that is already in use by a command buffer.",
9936                                 reinterpret_cast<const uint64_t &>(event));
9937        }
9938    }
9939    lock.unlock();
9940    // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
9941    // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
9942    // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
9943    for (auto queue_data : dev_data->queueMap) {
9944        auto event_entry = queue_data.second.eventToStageMap.find(event);
9945        if (event_entry != queue_data.second.eventToStageMap.end()) {
9946            event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
9947        }
9948    }
9949    if (!skip_call)
9950        result = dev_data->device_dispatch_table->SetEvent(device, event);
9951    return result;
9952}
9953
9954VKAPI_ATTR VkResult VKAPI_CALL
9955vkQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
9956    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
9957    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9958    bool skip_call = false;
9959    std::unique_lock<std::mutex> lock(global_lock);
9960    // First verify that fence is not in use
9961    if (fence != VK_NULL_HANDLE) {
9962        dev_data->fenceMap[fence].queue = queue;
9963        if ((bindInfoCount != 0) && dev_data->fenceMap[fence].in_use.load()) {
9964            skip_call |=
9965                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
9966                        reinterpret_cast<uint64_t &>(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
9967                        "Fence %#" PRIx64 " is already in use by another submission.", reinterpret_cast<uint64_t &>(fence));
9968        }
9969        if (!dev_data->fenceMap[fence].needsSignaled) {
9970            skip_call |=
9971                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
9972                        reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
9973                        "Fence %#" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
9974                        reinterpret_cast<uint64_t &>(fence));
9975        }
9976    }
9977    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
9978        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
9979        // Track objects tied to memory
9980        for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
9981            for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
9982                if (set_sparse_mem_binding(dev_data, bindInfo.pBufferBinds[j].pBinds[k].memory,
9983                                           (uint64_t)bindInfo.pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
9984                                           "vkQueueBindSparse"))
9985                    skip_call = true;
9986            }
9987        }
9988        for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
9989            for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
9990                if (set_sparse_mem_binding(dev_data, bindInfo.pImageOpaqueBinds[j].pBinds[k].memory,
9991                                           (uint64_t)bindInfo.pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9992                                           "vkQueueBindSparse"))
9993                    skip_call = true;
9994            }
9995        }
9996        for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
9997            for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
9998                if (set_sparse_mem_binding(dev_data, bindInfo.pImageBinds[j].pBinds[k].memory,
9999                                           (uint64_t)bindInfo.pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10000                                           "vkQueueBindSparse"))
10001                    skip_call = true;
10002            }
10003        }
10004        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
10005            const VkSemaphore &semaphore = bindInfo.pWaitSemaphores[i];
10006            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
10007                if (dev_data->semaphoreMap[semaphore].signaled) {
10008                    dev_data->semaphoreMap[semaphore].signaled = false;
10009                } else {
10010                    skip_call |=
10011                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10012                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10013                                "vkQueueBindSparse: Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64
10014                                " that has no way to be signaled.",
10015                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
10016                }
10017            }
10018        }
10019        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
10020            const VkSemaphore &semaphore = bindInfo.pSignalSemaphores[i];
10021            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
10022                if (dev_data->semaphoreMap[semaphore].signaled) {
10023                    skip_call =
10024                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10025                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10026                                "vkQueueBindSparse: Queue %#" PRIx64 " is signaling semaphore %#" PRIx64
10027                                ", but that semaphore is already signaled.",
10028                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
10029                }
10030                dev_data->semaphoreMap[semaphore].signaled = true;
10031            }
10032        }
10033    }
10034    print_mem_list(dev_data);
10035    lock.unlock();
10036
10037    if (!skip_call)
10038        return dev_data->device_dispatch_table->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
10039
10040    return result;
10041}
10042
10043VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
10044                                                 const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
10045    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10046    VkResult result = dev_data->device_dispatch_table->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
10047    if (result == VK_SUCCESS) {
10048        std::lock_guard<std::mutex> lock(global_lock);
10049        SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
10050        sNode->signaled = false;
10051        sNode->queue = VK_NULL_HANDLE;
10052        sNode->in_use.store(0);
10053    }
10054    return result;
10055}
10056
10057VKAPI_ATTR VkResult VKAPI_CALL
10058vkCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
10059    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10060    VkResult result = dev_data->device_dispatch_table->CreateEvent(device, pCreateInfo, pAllocator, pEvent);
10061    if (result == VK_SUCCESS) {
10062        std::lock_guard<std::mutex> lock(global_lock);
10063        dev_data->eventMap[*pEvent].needsSignaled = false;
10064        dev_data->eventMap[*pEvent].in_use.store(0);
10065        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
10066    }
10067    return result;
10068}
10069
10070VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
10071                                                                    const VkAllocationCallbacks *pAllocator,
10072                                                                    VkSwapchainKHR *pSwapchain) {
10073    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10074    VkResult result = dev_data->device_dispatch_table->CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
10075
10076    if (VK_SUCCESS == result) {
10077        SWAPCHAIN_NODE *psc_node = new SWAPCHAIN_NODE(pCreateInfo);
10078        std::lock_guard<std::mutex> lock(global_lock);
10079        dev_data->device_extensions.swapchainMap[*pSwapchain] = psc_node;
10080    }
10081
10082    return result;
10083}
10084
10085VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
10086vkDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
10087    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10088    bool skipCall = false;
10089
10090    std::unique_lock<std::mutex> lock(global_lock);
10091    auto swapchain_data = dev_data->device_extensions.swapchainMap.find(swapchain);
10092    if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
10093        if (swapchain_data->second->images.size() > 0) {
10094            for (auto swapchain_image : swapchain_data->second->images) {
10095                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
10096                if (image_sub != dev_data->imageSubresourceMap.end()) {
10097                    for (auto imgsubpair : image_sub->second) {
10098                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
10099                        if (image_item != dev_data->imageLayoutMap.end()) {
10100                            dev_data->imageLayoutMap.erase(image_item);
10101                        }
10102                    }
10103                    dev_data->imageSubresourceMap.erase(image_sub);
10104                }
10105                skipCall = clear_object_binding(dev_data, (uint64_t)swapchain_image,
10106                                                VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
10107                dev_data->imageMap.erase(swapchain_image);
10108            }
10109        }
10110        delete swapchain_data->second;
10111        dev_data->device_extensions.swapchainMap.erase(swapchain);
10112    }
10113    lock.unlock();
10114    if (!skipCall)
10115        dev_data->device_dispatch_table->DestroySwapchainKHR(device, swapchain, pAllocator);
10116}
10117
10118VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10119vkGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
10120    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10121    VkResult result = dev_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
10122
10123    if (result == VK_SUCCESS && pSwapchainImages != NULL) {
10124        // This should never happen and is checked by param checker.
10125        if (!pCount)
10126            return result;
10127        std::lock_guard<std::mutex> lock(global_lock);
10128        const size_t count = *pCount;
10129        auto swapchain_node = dev_data->device_extensions.swapchainMap[swapchain];
10130        if (!swapchain_node->images.empty()) {
10131            // TODO : Not sure I like the memcmp here, but it works
10132            const bool mismatch = (swapchain_node->images.size() != count ||
10133                                   memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
10134            if (mismatch) {
10135                // TODO: Verify against Valid Usage section of extension
10136                log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10137                        (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
10138                        "vkGetSwapchainInfoKHR(%" PRIu64
10139                        ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
10140                        (uint64_t)(swapchain));
10141            }
10142        }
10143        for (uint32_t i = 0; i < *pCount; ++i) {
10144            IMAGE_LAYOUT_NODE image_layout_node;
10145            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
10146            image_layout_node.format = swapchain_node->createInfo.imageFormat;
10147            auto &image_node = dev_data->imageMap[pSwapchainImages[i]];
10148            image_node.createInfo.mipLevels = 1;
10149            image_node.createInfo.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
10150            image_node.createInfo.usage = swapchain_node->createInfo.imageUsage;
10151            image_node.valid = false;
10152            image_node.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
10153            swapchain_node->images.push_back(pSwapchainImages[i]);
10154            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
10155            dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
10156            dev_data->imageLayoutMap[subpair] = image_layout_node;
10157            dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
10158        }
10159    }
10160    return result;
10161}
10162
10163VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
10164    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10165    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10166    bool skip_call = false;
10167
10168    if (pPresentInfo) {
10169        std::lock_guard<std::mutex> lock(global_lock);
10170        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10171            const VkSemaphore &semaphore = pPresentInfo->pWaitSemaphores[i];
10172            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
10173                if (dev_data->semaphoreMap[semaphore].signaled) {
10174                    dev_data->semaphoreMap[semaphore].signaled = false;
10175                } else {
10176                    skip_call |=
10177                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10178                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10179                                "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
10180                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
10181                }
10182            }
10183        }
10184        VkDeviceMemory mem;
10185        for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
10186            auto swapchain_data = dev_data->device_extensions.swapchainMap.find(pPresentInfo->pSwapchains[i]);
10187            if (swapchain_data != dev_data->device_extensions.swapchainMap.end() &&
10188                pPresentInfo->pImageIndices[i] < swapchain_data->second->images.size()) {
10189                VkImage image = swapchain_data->second->images[pPresentInfo->pImageIndices[i]];
10190#if MTMERGESOURCE
10191                skip_call |=
10192                    get_mem_binding_from_object(dev_data, (uint64_t)(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
10193                skip_call |= validate_memory_is_valid(dev_data, mem, "vkQueuePresentKHR()", image);
10194#endif
10195                vector<VkImageLayout> layouts;
10196                if (FindLayouts(dev_data, image, layouts)) {
10197                    for (auto layout : layouts) {
10198                        if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
10199                            skip_call |=
10200                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
10201                                        reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10202                                        "Images passed to present must be in layout "
10203                                        "PRESENT_SOURCE_KHR but is in %s",
10204                                        string_VkImageLayout(layout));
10205                        }
10206                    }
10207                }
10208            }
10209        }
10210    }
10211
10212    if (!skip_call)
10213        result = dev_data->device_dispatch_table->QueuePresentKHR(queue, pPresentInfo);
10214
10215    return result;
10216}
10217
10218VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
10219                                                     VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
10220    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10221    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10222    bool skipCall = false;
10223
10224    std::unique_lock<std::mutex> lock(global_lock);
10225    if (semaphore != VK_NULL_HANDLE &&
10226        dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
10227        if (dev_data->semaphoreMap[semaphore].signaled) {
10228            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10229                               reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10230                               "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state");
10231        }
10232        dev_data->semaphoreMap[semaphore].signaled = true;
10233    }
10234    auto fence_data = dev_data->fenceMap.find(fence);
10235    if (fence_data != dev_data->fenceMap.end()) {
10236        fence_data->second.swapchain = swapchain;
10237    }
10238    lock.unlock();
10239
10240    if (!skipCall) {
10241        result =
10242            dev_data->device_dispatch_table->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
10243    }
10244
10245    return result;
10246}
10247
10248VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10249vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
10250                               const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
10251    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10252    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10253    VkResult res = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
10254    if (VK_SUCCESS == res) {
10255        std::lock_guard<std::mutex> lock(global_lock);
10256        res = layer_create_msg_callback(my_data->report_data, pCreateInfo, pAllocator, pMsgCallback);
10257    }
10258    return res;
10259}
10260
10261VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkInstance instance,
10262                                                                           VkDebugReportCallbackEXT msgCallback,
10263                                                                           const VkAllocationCallbacks *pAllocator) {
10264    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10265    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10266    pTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
10267    std::lock_guard<std::mutex> lock(global_lock);
10268    layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator);
10269}
10270
10271VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
10272vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
10273                        size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
10274    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10275    my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
10276                                                            pMsg);
10277}
10278
10279VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
10280    if (!strcmp(funcName, "vkGetDeviceProcAddr"))
10281        return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
10282    if (!strcmp(funcName, "vkDestroyDevice"))
10283        return (PFN_vkVoidFunction)vkDestroyDevice;
10284    if (!strcmp(funcName, "vkQueueSubmit"))
10285        return (PFN_vkVoidFunction)vkQueueSubmit;
10286    if (!strcmp(funcName, "vkWaitForFences"))
10287        return (PFN_vkVoidFunction)vkWaitForFences;
10288    if (!strcmp(funcName, "vkGetFenceStatus"))
10289        return (PFN_vkVoidFunction)vkGetFenceStatus;
10290    if (!strcmp(funcName, "vkQueueWaitIdle"))
10291        return (PFN_vkVoidFunction)vkQueueWaitIdle;
10292    if (!strcmp(funcName, "vkDeviceWaitIdle"))
10293        return (PFN_vkVoidFunction)vkDeviceWaitIdle;
10294    if (!strcmp(funcName, "vkGetDeviceQueue"))
10295        return (PFN_vkVoidFunction)vkGetDeviceQueue;
10296    if (!strcmp(funcName, "vkDestroyInstance"))
10297        return (PFN_vkVoidFunction)vkDestroyInstance;
10298    if (!strcmp(funcName, "vkDestroyDevice"))
10299        return (PFN_vkVoidFunction)vkDestroyDevice;
10300    if (!strcmp(funcName, "vkDestroyFence"))
10301        return (PFN_vkVoidFunction)vkDestroyFence;
10302    if (!strcmp(funcName, "vkResetFences"))
10303        return (PFN_vkVoidFunction)vkResetFences;
10304    if (!strcmp(funcName, "vkDestroySemaphore"))
10305        return (PFN_vkVoidFunction)vkDestroySemaphore;
10306    if (!strcmp(funcName, "vkDestroyEvent"))
10307        return (PFN_vkVoidFunction)vkDestroyEvent;
10308    if (!strcmp(funcName, "vkDestroyQueryPool"))
10309        return (PFN_vkVoidFunction)vkDestroyQueryPool;
10310    if (!strcmp(funcName, "vkDestroyBuffer"))
10311        return (PFN_vkVoidFunction)vkDestroyBuffer;
10312    if (!strcmp(funcName, "vkDestroyBufferView"))
10313        return (PFN_vkVoidFunction)vkDestroyBufferView;
10314    if (!strcmp(funcName, "vkDestroyImage"))
10315        return (PFN_vkVoidFunction)vkDestroyImage;
10316    if (!strcmp(funcName, "vkDestroyImageView"))
10317        return (PFN_vkVoidFunction)vkDestroyImageView;
10318    if (!strcmp(funcName, "vkDestroyShaderModule"))
10319        return (PFN_vkVoidFunction)vkDestroyShaderModule;
10320    if (!strcmp(funcName, "vkDestroyPipeline"))
10321        return (PFN_vkVoidFunction)vkDestroyPipeline;
10322    if (!strcmp(funcName, "vkDestroyPipelineLayout"))
10323        return (PFN_vkVoidFunction)vkDestroyPipelineLayout;
10324    if (!strcmp(funcName, "vkDestroySampler"))
10325        return (PFN_vkVoidFunction)vkDestroySampler;
10326    if (!strcmp(funcName, "vkDestroyDescriptorSetLayout"))
10327        return (PFN_vkVoidFunction)vkDestroyDescriptorSetLayout;
10328    if (!strcmp(funcName, "vkDestroyDescriptorPool"))
10329        return (PFN_vkVoidFunction)vkDestroyDescriptorPool;
10330    if (!strcmp(funcName, "vkDestroyFramebuffer"))
10331        return (PFN_vkVoidFunction)vkDestroyFramebuffer;
10332    if (!strcmp(funcName, "vkDestroyRenderPass"))
10333        return (PFN_vkVoidFunction)vkDestroyRenderPass;
10334    if (!strcmp(funcName, "vkCreateBuffer"))
10335        return (PFN_vkVoidFunction)vkCreateBuffer;
10336    if (!strcmp(funcName, "vkCreateBufferView"))
10337        return (PFN_vkVoidFunction)vkCreateBufferView;
10338    if (!strcmp(funcName, "vkCreateImage"))
10339        return (PFN_vkVoidFunction)vkCreateImage;
10340    if (!strcmp(funcName, "vkCreateImageView"))
10341        return (PFN_vkVoidFunction)vkCreateImageView;
10342    if (!strcmp(funcName, "vkCreateFence"))
10343        return (PFN_vkVoidFunction)vkCreateFence;
10344    if (!strcmp(funcName, "CreatePipelineCache"))
10345        return (PFN_vkVoidFunction)vkCreatePipelineCache;
10346    if (!strcmp(funcName, "DestroyPipelineCache"))
10347        return (PFN_vkVoidFunction)vkDestroyPipelineCache;
10348    if (!strcmp(funcName, "GetPipelineCacheData"))
10349        return (PFN_vkVoidFunction)vkGetPipelineCacheData;
10350    if (!strcmp(funcName, "MergePipelineCaches"))
10351        return (PFN_vkVoidFunction)vkMergePipelineCaches;
10352    if (!strcmp(funcName, "vkCreateGraphicsPipelines"))
10353        return (PFN_vkVoidFunction)vkCreateGraphicsPipelines;
10354    if (!strcmp(funcName, "vkCreateComputePipelines"))
10355        return (PFN_vkVoidFunction)vkCreateComputePipelines;
10356    if (!strcmp(funcName, "vkCreateSampler"))
10357        return (PFN_vkVoidFunction)vkCreateSampler;
10358    if (!strcmp(funcName, "vkCreateDescriptorSetLayout"))
10359        return (PFN_vkVoidFunction)vkCreateDescriptorSetLayout;
10360    if (!strcmp(funcName, "vkCreatePipelineLayout"))
10361        return (PFN_vkVoidFunction)vkCreatePipelineLayout;
10362    if (!strcmp(funcName, "vkCreateDescriptorPool"))
10363        return (PFN_vkVoidFunction)vkCreateDescriptorPool;
10364    if (!strcmp(funcName, "vkResetDescriptorPool"))
10365        return (PFN_vkVoidFunction)vkResetDescriptorPool;
10366    if (!strcmp(funcName, "vkAllocateDescriptorSets"))
10367        return (PFN_vkVoidFunction)vkAllocateDescriptorSets;
10368    if (!strcmp(funcName, "vkFreeDescriptorSets"))
10369        return (PFN_vkVoidFunction)vkFreeDescriptorSets;
10370    if (!strcmp(funcName, "vkUpdateDescriptorSets"))
10371        return (PFN_vkVoidFunction)vkUpdateDescriptorSets;
10372    if (!strcmp(funcName, "vkCreateCommandPool"))
10373        return (PFN_vkVoidFunction)vkCreateCommandPool;
10374    if (!strcmp(funcName, "vkDestroyCommandPool"))
10375        return (PFN_vkVoidFunction)vkDestroyCommandPool;
10376    if (!strcmp(funcName, "vkResetCommandPool"))
10377        return (PFN_vkVoidFunction)vkResetCommandPool;
10378    if (!strcmp(funcName, "vkCreateQueryPool"))
10379        return (PFN_vkVoidFunction)vkCreateQueryPool;
10380    if (!strcmp(funcName, "vkAllocateCommandBuffers"))
10381        return (PFN_vkVoidFunction)vkAllocateCommandBuffers;
10382    if (!strcmp(funcName, "vkFreeCommandBuffers"))
10383        return (PFN_vkVoidFunction)vkFreeCommandBuffers;
10384    if (!strcmp(funcName, "vkBeginCommandBuffer"))
10385        return (PFN_vkVoidFunction)vkBeginCommandBuffer;
10386    if (!strcmp(funcName, "vkEndCommandBuffer"))
10387        return (PFN_vkVoidFunction)vkEndCommandBuffer;
10388    if (!strcmp(funcName, "vkResetCommandBuffer"))
10389        return (PFN_vkVoidFunction)vkResetCommandBuffer;
10390    if (!strcmp(funcName, "vkCmdBindPipeline"))
10391        return (PFN_vkVoidFunction)vkCmdBindPipeline;
10392    if (!strcmp(funcName, "vkCmdSetViewport"))
10393        return (PFN_vkVoidFunction)vkCmdSetViewport;
10394    if (!strcmp(funcName, "vkCmdSetScissor"))
10395        return (PFN_vkVoidFunction)vkCmdSetScissor;
10396    if (!strcmp(funcName, "vkCmdSetLineWidth"))
10397        return (PFN_vkVoidFunction)vkCmdSetLineWidth;
10398    if (!strcmp(funcName, "vkCmdSetDepthBias"))
10399        return (PFN_vkVoidFunction)vkCmdSetDepthBias;
10400    if (!strcmp(funcName, "vkCmdSetBlendConstants"))
10401        return (PFN_vkVoidFunction)vkCmdSetBlendConstants;
10402    if (!strcmp(funcName, "vkCmdSetDepthBounds"))
10403        return (PFN_vkVoidFunction)vkCmdSetDepthBounds;
10404    if (!strcmp(funcName, "vkCmdSetStencilCompareMask"))
10405        return (PFN_vkVoidFunction)vkCmdSetStencilCompareMask;
10406    if (!strcmp(funcName, "vkCmdSetStencilWriteMask"))
10407        return (PFN_vkVoidFunction)vkCmdSetStencilWriteMask;
10408    if (!strcmp(funcName, "vkCmdSetStencilReference"))
10409        return (PFN_vkVoidFunction)vkCmdSetStencilReference;
10410    if (!strcmp(funcName, "vkCmdBindDescriptorSets"))
10411        return (PFN_vkVoidFunction)vkCmdBindDescriptorSets;
10412    if (!strcmp(funcName, "vkCmdBindVertexBuffers"))
10413        return (PFN_vkVoidFunction)vkCmdBindVertexBuffers;
10414    if (!strcmp(funcName, "vkCmdBindIndexBuffer"))
10415        return (PFN_vkVoidFunction)vkCmdBindIndexBuffer;
10416    if (!strcmp(funcName, "vkCmdDraw"))
10417        return (PFN_vkVoidFunction)vkCmdDraw;
10418    if (!strcmp(funcName, "vkCmdDrawIndexed"))
10419        return (PFN_vkVoidFunction)vkCmdDrawIndexed;
10420    if (!strcmp(funcName, "vkCmdDrawIndirect"))
10421        return (PFN_vkVoidFunction)vkCmdDrawIndirect;
10422    if (!strcmp(funcName, "vkCmdDrawIndexedIndirect"))
10423        return (PFN_vkVoidFunction)vkCmdDrawIndexedIndirect;
10424    if (!strcmp(funcName, "vkCmdDispatch"))
10425        return (PFN_vkVoidFunction)vkCmdDispatch;
10426    if (!strcmp(funcName, "vkCmdDispatchIndirect"))
10427        return (PFN_vkVoidFunction)vkCmdDispatchIndirect;
10428    if (!strcmp(funcName, "vkCmdCopyBuffer"))
10429        return (PFN_vkVoidFunction)vkCmdCopyBuffer;
10430    if (!strcmp(funcName, "vkCmdCopyImage"))
10431        return (PFN_vkVoidFunction)vkCmdCopyImage;
10432    if (!strcmp(funcName, "vkCmdBlitImage"))
10433        return (PFN_vkVoidFunction)vkCmdBlitImage;
10434    if (!strcmp(funcName, "vkCmdCopyBufferToImage"))
10435        return (PFN_vkVoidFunction)vkCmdCopyBufferToImage;
10436    if (!strcmp(funcName, "vkCmdCopyImageToBuffer"))
10437        return (PFN_vkVoidFunction)vkCmdCopyImageToBuffer;
10438    if (!strcmp(funcName, "vkCmdUpdateBuffer"))
10439        return (PFN_vkVoidFunction)vkCmdUpdateBuffer;
10440    if (!strcmp(funcName, "vkCmdFillBuffer"))
10441        return (PFN_vkVoidFunction)vkCmdFillBuffer;
10442    if (!strcmp(funcName, "vkCmdClearColorImage"))
10443        return (PFN_vkVoidFunction)vkCmdClearColorImage;
10444    if (!strcmp(funcName, "vkCmdClearDepthStencilImage"))
10445        return (PFN_vkVoidFunction)vkCmdClearDepthStencilImage;
10446    if (!strcmp(funcName, "vkCmdClearAttachments"))
10447        return (PFN_vkVoidFunction)vkCmdClearAttachments;
10448    if (!strcmp(funcName, "vkCmdResolveImage"))
10449        return (PFN_vkVoidFunction)vkCmdResolveImage;
10450    if (!strcmp(funcName, "vkCmdSetEvent"))
10451        return (PFN_vkVoidFunction)vkCmdSetEvent;
10452    if (!strcmp(funcName, "vkCmdResetEvent"))
10453        return (PFN_vkVoidFunction)vkCmdResetEvent;
10454    if (!strcmp(funcName, "vkCmdWaitEvents"))
10455        return (PFN_vkVoidFunction)vkCmdWaitEvents;
10456    if (!strcmp(funcName, "vkCmdPipelineBarrier"))
10457        return (PFN_vkVoidFunction)vkCmdPipelineBarrier;
10458    if (!strcmp(funcName, "vkCmdBeginQuery"))
10459        return (PFN_vkVoidFunction)vkCmdBeginQuery;
10460    if (!strcmp(funcName, "vkCmdEndQuery"))
10461        return (PFN_vkVoidFunction)vkCmdEndQuery;
10462    if (!strcmp(funcName, "vkCmdResetQueryPool"))
10463        return (PFN_vkVoidFunction)vkCmdResetQueryPool;
10464    if (!strcmp(funcName, "vkCmdCopyQueryPoolResults"))
10465        return (PFN_vkVoidFunction)vkCmdCopyQueryPoolResults;
10466    if (!strcmp(funcName, "vkCmdPushConstants"))
10467        return (PFN_vkVoidFunction)vkCmdPushConstants;
10468    if (!strcmp(funcName, "vkCmdWriteTimestamp"))
10469        return (PFN_vkVoidFunction)vkCmdWriteTimestamp;
10470    if (!strcmp(funcName, "vkCreateFramebuffer"))
10471        return (PFN_vkVoidFunction)vkCreateFramebuffer;
10472    if (!strcmp(funcName, "vkCreateShaderModule"))
10473        return (PFN_vkVoidFunction)vkCreateShaderModule;
10474    if (!strcmp(funcName, "vkCreateRenderPass"))
10475        return (PFN_vkVoidFunction)vkCreateRenderPass;
10476    if (!strcmp(funcName, "vkCmdBeginRenderPass"))
10477        return (PFN_vkVoidFunction)vkCmdBeginRenderPass;
10478    if (!strcmp(funcName, "vkCmdNextSubpass"))
10479        return (PFN_vkVoidFunction)vkCmdNextSubpass;
10480    if (!strcmp(funcName, "vkCmdEndRenderPass"))
10481        return (PFN_vkVoidFunction)vkCmdEndRenderPass;
10482    if (!strcmp(funcName, "vkCmdExecuteCommands"))
10483        return (PFN_vkVoidFunction)vkCmdExecuteCommands;
10484    if (!strcmp(funcName, "vkSetEvent"))
10485        return (PFN_vkVoidFunction)vkSetEvent;
10486    if (!strcmp(funcName, "vkMapMemory"))
10487        return (PFN_vkVoidFunction)vkMapMemory;
10488#if MTMERGESOURCE
10489    if (!strcmp(funcName, "vkUnmapMemory"))
10490        return (PFN_vkVoidFunction)vkUnmapMemory;
10491    if (!strcmp(funcName, "vkAllocateMemory"))
10492        return (PFN_vkVoidFunction)vkAllocateMemory;
10493    if (!strcmp(funcName, "vkFreeMemory"))
10494        return (PFN_vkVoidFunction)vkFreeMemory;
10495    if (!strcmp(funcName, "vkFlushMappedMemoryRanges"))
10496        return (PFN_vkVoidFunction)vkFlushMappedMemoryRanges;
10497    if (!strcmp(funcName, "vkInvalidateMappedMemoryRanges"))
10498        return (PFN_vkVoidFunction)vkInvalidateMappedMemoryRanges;
10499    if (!strcmp(funcName, "vkBindBufferMemory"))
10500        return (PFN_vkVoidFunction)vkBindBufferMemory;
10501    if (!strcmp(funcName, "vkGetBufferMemoryRequirements"))
10502        return (PFN_vkVoidFunction)vkGetBufferMemoryRequirements;
10503    if (!strcmp(funcName, "vkGetImageMemoryRequirements"))
10504        return (PFN_vkVoidFunction)vkGetImageMemoryRequirements;
10505#endif
10506    if (!strcmp(funcName, "vkGetQueryPoolResults"))
10507        return (PFN_vkVoidFunction)vkGetQueryPoolResults;
10508    if (!strcmp(funcName, "vkBindImageMemory"))
10509        return (PFN_vkVoidFunction)vkBindImageMemory;
10510    if (!strcmp(funcName, "vkQueueBindSparse"))
10511        return (PFN_vkVoidFunction)vkQueueBindSparse;
10512    if (!strcmp(funcName, "vkCreateSemaphore"))
10513        return (PFN_vkVoidFunction)vkCreateSemaphore;
10514    if (!strcmp(funcName, "vkCreateEvent"))
10515        return (PFN_vkVoidFunction)vkCreateEvent;
10516
10517    if (dev == NULL)
10518        return NULL;
10519
10520    layer_data *dev_data;
10521    dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
10522
10523    if (dev_data->device_extensions.wsi_enabled) {
10524        if (!strcmp(funcName, "vkCreateSwapchainKHR"))
10525            return (PFN_vkVoidFunction)vkCreateSwapchainKHR;
10526        if (!strcmp(funcName, "vkDestroySwapchainKHR"))
10527            return (PFN_vkVoidFunction)vkDestroySwapchainKHR;
10528        if (!strcmp(funcName, "vkGetSwapchainImagesKHR"))
10529            return (PFN_vkVoidFunction)vkGetSwapchainImagesKHR;
10530        if (!strcmp(funcName, "vkAcquireNextImageKHR"))
10531            return (PFN_vkVoidFunction)vkAcquireNextImageKHR;
10532        if (!strcmp(funcName, "vkQueuePresentKHR"))
10533            return (PFN_vkVoidFunction)vkQueuePresentKHR;
10534    }
10535
10536    VkLayerDispatchTable *pTable = dev_data->device_dispatch_table;
10537    {
10538        if (pTable->GetDeviceProcAddr == NULL)
10539            return NULL;
10540        return pTable->GetDeviceProcAddr(dev, funcName);
10541    }
10542}
10543
10544VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
10545    if (!strcmp(funcName, "vkGetInstanceProcAddr"))
10546        return (PFN_vkVoidFunction)vkGetInstanceProcAddr;
10547    if (!strcmp(funcName, "vkGetDeviceProcAddr"))
10548        return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
10549    if (!strcmp(funcName, "vkCreateInstance"))
10550        return (PFN_vkVoidFunction)vkCreateInstance;
10551    if (!strcmp(funcName, "vkCreateDevice"))
10552        return (PFN_vkVoidFunction)vkCreateDevice;
10553    if (!strcmp(funcName, "vkDestroyInstance"))
10554        return (PFN_vkVoidFunction)vkDestroyInstance;
10555    if (!strcmp(funcName, "vkEnumerateInstanceLayerProperties"))
10556        return (PFN_vkVoidFunction)vkEnumerateInstanceLayerProperties;
10557    if (!strcmp(funcName, "vkEnumerateInstanceExtensionProperties"))
10558        return (PFN_vkVoidFunction)vkEnumerateInstanceExtensionProperties;
10559    if (!strcmp(funcName, "vkEnumerateDeviceLayerProperties"))
10560        return (PFN_vkVoidFunction)vkEnumerateDeviceLayerProperties;
10561    if (!strcmp(funcName, "vkEnumerateDeviceExtensionProperties"))
10562        return (PFN_vkVoidFunction)vkEnumerateDeviceExtensionProperties;
10563
10564    if (instance == NULL)
10565        return NULL;
10566
10567    PFN_vkVoidFunction fptr;
10568
10569    layer_data *my_data;
10570    my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10571    fptr = debug_report_get_instance_proc_addr(my_data->report_data, funcName);
10572    if (fptr)
10573        return fptr;
10574
10575    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10576    if (pTable->GetInstanceProcAddr == NULL)
10577        return NULL;
10578    return pTable->GetInstanceProcAddr(instance, funcName);
10579}
10580