core_validation.cpp revision 01a48e41895b3951b297ff4245eff8f9c129ae20
1/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
25 */
26
27// Allow use of STL min and max functions in Windows
28#define NOMINMAX
29
30// Turn on mem_tracker merged code
31#define MTMERGESOURCE 1
32
33#include <SPIRV/spirv.hpp>
34#include <algorithm>
35#include <assert.h>
36#include <iostream>
37#include <list>
38#include <map>
39#include <mutex>
40#include <set>
41#include <stdio.h>
42#include <stdlib.h>
43#include <string.h>
44#include <string>
45#include <tuple>
46
47#include "vk_loader_platform.h"
48#include "vk_dispatch_table_helper.h"
49#include "vk_struct_string_helper_cpp.h"
50#if defined(__GNUC__)
51#pragma GCC diagnostic ignored "-Wwrite-strings"
52#endif
53#if defined(__GNUC__)
54#pragma GCC diagnostic warning "-Wwrite-strings"
55#endif
56#include "vk_struct_size_helper.h"
57#include "core_validation.h"
58#include "vk_layer_table.h"
59#include "vk_layer_data.h"
60#include "vk_layer_extension_utils.h"
61#include "vk_layer_utils.h"
62#include "spirv-tools/libspirv.h"
63
64#if defined __ANDROID__
65#include <android/log.h>
66#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
67#else
68#define LOGCONSOLE(...)                                                                                                            \
69    {                                                                                                                              \
70        printf(__VA_ARGS__);                                                                                                       \
71        printf("\n");                                                                                                              \
72    }
73#endif
74
75using namespace std;
76
77// TODO : CB really needs it's own class and files so this is just temp code until that happens
78GLOBAL_CB_NODE::~GLOBAL_CB_NODE() {
79    for (uint32_t i=0; i<VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
80        // Make sure that no sets hold onto deleted CB binding
81        for (auto set : lastBound[i].uniqueBoundSets) {
82            set->RemoveBoundCommandBuffer(this);
83        }
84    }
85}
86
87namespace core_validation {
88
89using std::unordered_map;
90using std::unordered_set;
91
92// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
93// Object value will be used to identify them internally.
94static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
95
96struct devExts {
97    bool wsi_enabled;
98    unordered_map<VkSwapchainKHR, unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
99    unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
100};
101
102// fwd decls
103struct shader_module;
104
105// TODO : Split this into separate structs for instance and device level data?
106struct layer_data {
107    VkInstance instance;
108
109    debug_report_data *report_data;
110    std::vector<VkDebugReportCallbackEXT> logging_callback;
111    VkLayerDispatchTable *device_dispatch_table;
112    VkLayerInstanceDispatchTable *instance_dispatch_table;
113
114    devExts device_extensions;
115    unordered_set<VkQueue> queues;  // all queues under given device
116    // Global set of all cmdBuffers that are inFlight on this device
117    unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
118    // Layer specific data
119    unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> samplerMap;
120    unordered_map<VkImageView, unique_ptr<VkImageViewCreateInfo>> imageViewMap;
121    unordered_map<VkImage, unique_ptr<IMAGE_NODE>> imageMap;
122    unordered_map<VkBufferView, unique_ptr<VkBufferViewCreateInfo>> bufferViewMap;
123    unordered_map<VkBuffer, unique_ptr<BUFFER_NODE>> bufferMap;
124    unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap;
125    unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
126    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap;
127    unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
128    unordered_map<VkDescriptorSetLayout, cvdescriptorset::DescriptorSetLayout *> descriptorSetLayoutMap;
129    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
130    unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
131    unordered_map<VkFence, FENCE_NODE> fenceMap;
132    unordered_map<VkQueue, QUEUE_NODE> queueMap;
133    unordered_map<VkEvent, EVENT_NODE> eventMap;
134    unordered_map<QueryObject, bool> queryToStateMap;
135    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
136    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
137    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
138    unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_NODE>> frameBufferMap;
139    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
140    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
141    unordered_map<VkRenderPass, RENDER_PASS_NODE *> renderPassMap;
142    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
143    VkDevice device;
144
145    // Device specific data
146    PHYS_DEV_PROPERTIES_NODE phys_dev_properties;
147    VkPhysicalDeviceMemoryProperties phys_dev_mem_props;
148
149    layer_data()
150        : report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr), device_extensions(),
151          device(VK_NULL_HANDLE), phys_dev_properties{}, phys_dev_mem_props{} {};
152};
153
154// TODO : Do we need to guard access to layer_data_map w/ lock?
155static unordered_map<void *, layer_data *> layer_data_map;
156
157static const VkLayerProperties global_layer = {
158    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
159};
160
161template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
162    bool foundLayer = false;
163    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
164        if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
165            foundLayer = true;
166        }
167        // This has to be logged to console as we don't have a callback at this point.
168        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
169            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
170                       global_layer.layerName);
171        }
172    }
173}
174
175// Code imported from shader_checker
176static void build_def_index(shader_module *);
177
178// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
179// without the caller needing to care too much about the physical SPIRV module layout.
180struct spirv_inst_iter {
181    std::vector<uint32_t>::const_iterator zero;
182    std::vector<uint32_t>::const_iterator it;
183
184    uint32_t len() {
185        auto result = *it >> 16;
186        assert(result > 0);
187        return result;
188    }
189
190    uint32_t opcode() { return *it & 0x0ffffu; }
191
192    uint32_t const &word(unsigned n) {
193        assert(n < len());
194        return it[n];
195    }
196
197    uint32_t offset() { return (uint32_t)(it - zero); }
198
199    spirv_inst_iter() {}
200
201    spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
202
203    bool operator==(spirv_inst_iter const &other) { return it == other.it; }
204
205    bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
206
207    spirv_inst_iter operator++(int) { /* x++ */
208        spirv_inst_iter ii = *this;
209        it += len();
210        return ii;
211    }
212
213    spirv_inst_iter operator++() { /* ++x; */
214        it += len();
215        return *this;
216    }
217
218    /* The iterator and the value are the same thing. */
219    spirv_inst_iter &operator*() { return *this; }
220    spirv_inst_iter const &operator*() const { return *this; }
221};
222
223struct shader_module {
224    /* the spirv image itself */
225    vector<uint32_t> words;
226    /* a mapping of <id> to the first word of its def. this is useful because walking type
227     * trees, constant expressions, etc requires jumping all over the instruction stream.
228     */
229    unordered_map<unsigned, unsigned> def_index;
230
231    shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
232        : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
233          def_index() {
234
235        build_def_index(this);
236    }
237
238    /* expose begin() / end() to enable range-based for */
239    spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
240    spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }         /* just past last insn */
241    /* given an offset into the module, produce an iterator there. */
242    spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
243
244    /* gets an iterator to the definition of an id */
245    spirv_inst_iter get_def(unsigned id) const {
246        auto it = def_index.find(id);
247        if (it == def_index.end()) {
248            return end();
249        }
250        return at(it->second);
251    }
252};
253
254// TODO : This can be much smarter, using separate locks for separate global data
255static std::mutex global_lock;
256
257// Return ImageViewCreateInfo ptr for specified imageView or else NULL
258VkImageViewCreateInfo *getImageViewData(const layer_data *dev_data, VkImageView image_view) {
259    auto iv_it = dev_data->imageViewMap.find(image_view);
260    if (iv_it == dev_data->imageViewMap.end()) {
261        return nullptr;
262    }
263    return iv_it->second.get();
264}
265// Return sampler node ptr for specified sampler or else NULL
266SAMPLER_NODE *getSamplerNode(const layer_data *dev_data, VkSampler sampler) {
267    auto sampler_it = dev_data->samplerMap.find(sampler);
268    if (sampler_it == dev_data->samplerMap.end()) {
269        return nullptr;
270    }
271    return sampler_it->second.get();
272}
273// Return image node ptr for specified image or else NULL
274IMAGE_NODE *getImageNode(const layer_data *dev_data, VkImage image) {
275    auto img_it = dev_data->imageMap.find(image);
276    if (img_it == dev_data->imageMap.end()) {
277        return nullptr;
278    }
279    return img_it->second.get();
280}
281// Return buffer node ptr for specified buffer or else NULL
282BUFFER_NODE *getBufferNode(const layer_data *dev_data, VkBuffer buffer) {
283    auto buff_it = dev_data->bufferMap.find(buffer);
284    if (buff_it == dev_data->bufferMap.end()) {
285        return nullptr;
286    }
287    return buff_it->second.get();
288}
289// Return swapchain node for specified swapchain or else NULL
290SWAPCHAIN_NODE *getSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
291    auto swp_it = dev_data->device_extensions.swapchainMap.find(swapchain);
292    if (swp_it == dev_data->device_extensions.swapchainMap.end()) {
293        return nullptr;
294    }
295    return swp_it->second.get();
296}
297// Return swapchain for specified image or else NULL
298VkSwapchainKHR getSwapchainFromImage(const layer_data *dev_data, VkImage image) {
299    auto img_it = dev_data->device_extensions.imageToSwapchainMap.find(image);
300    if (img_it == dev_data->device_extensions.imageToSwapchainMap.end()) {
301        return VK_NULL_HANDLE;
302    }
303    return img_it->second;
304}
305// Return buffer node ptr for specified buffer or else NULL
306VkBufferViewCreateInfo *getBufferViewInfo(const layer_data *my_data, VkBufferView buffer_view) {
307    auto bv_it = my_data->bufferViewMap.find(buffer_view);
308    if (bv_it == my_data->bufferViewMap.end()) {
309        return nullptr;
310    }
311    return bv_it->second.get();
312}
313
314FENCE_NODE *getFenceNode(layer_data *dev_data, VkFence fence) {
315    auto it = dev_data->fenceMap.find(fence);
316    if (it == dev_data->fenceMap.end()) {
317        return nullptr;
318    }
319    return &it->second;
320}
321
322QUEUE_NODE *getQueueNode(layer_data *dev_data, VkQueue queue) {
323    auto it = dev_data->queueMap.find(queue);
324    if (it == dev_data->queueMap.end()) {
325        return nullptr;
326    }
327    return &it->second;
328}
329
330SEMAPHORE_NODE *getSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
331    auto it = dev_data->semaphoreMap.find(semaphore);
332    if (it == dev_data->semaphoreMap.end()) {
333        return nullptr;
334    }
335    return &it->second;
336}
337
338COMMAND_POOL_NODE *getCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
339    auto it = dev_data->commandPoolMap.find(pool);
340    if (it == dev_data->commandPoolMap.end()) {
341        return nullptr;
342    }
343    return &it->second;
344}
345
346static VkDeviceMemory *get_object_mem_binding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
347    switch (type) {
348    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
349        auto img_node = getImageNode(my_data, VkImage(handle));
350        if (img_node)
351            return &img_node->mem;
352        break;
353    }
354    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
355        auto buff_node = getBufferNode(my_data, VkBuffer(handle));
356        if (buff_node)
357            return &buff_node->mem;
358        break;
359    }
360    default:
361        break;
362    }
363    return nullptr;
364}
365
366// prototype
367static GLOBAL_CB_NODE *getCBNode(layer_data const *, const VkCommandBuffer);
368
369// Helper function to validate correct usage bits set for buffers or images
370//  Verify that (actual & desired) flags != 0 or,
371//   if strict is true, verify that (actual & desired) flags == desired
372//  In case of error, report it via dbg callbacks
373static bool validate_usage_flags(layer_data *my_data, VkFlags actual, VkFlags desired, VkBool32 strict,
374                                     uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str,
375                                     char const *func_name, char const *usage_str) {
376    bool correct_usage = false;
377    bool skipCall = false;
378    if (strict)
379        correct_usage = ((actual & desired) == desired);
380    else
381        correct_usage = ((actual & desired) != 0);
382    if (!correct_usage) {
383        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
384                           MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s 0x%" PRIxLEAST64
385                                                               " used by %s. In this case, %s should have %s set during creation.",
386                           ty_str, obj_handle, func_name, ty_str, usage_str);
387    }
388    return skipCall;
389}
390
391// Helper function to validate usage flags for images
392// Pulls image info and then sends actual vs. desired usage off to helper above where
393//  an error will be flagged if usage is not correct
394static bool validate_image_usage_flags(layer_data *dev_data, VkImage image, VkFlags desired, VkBool32 strict,
395                                           char const *func_name, char const *usage_string) {
396    bool skipCall = false;
397    auto const image_node = getImageNode(dev_data, image);
398    if (image_node) {
399        skipCall = validate_usage_flags(dev_data, image_node->createInfo.usage, desired, strict, (uint64_t)image,
400                                        VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "image", func_name, usage_string);
401    }
402    return skipCall;
403}
404
405// Helper function to validate usage flags for buffers
406// Pulls buffer info and then sends actual vs. desired usage off to helper above where
407//  an error will be flagged if usage is not correct
408static bool validate_buffer_usage_flags(layer_data *dev_data, VkBuffer buffer, VkFlags desired, VkBool32 strict,
409                                            char const *func_name, char const *usage_string) {
410    bool skipCall = false;
411    auto buffer_node = getBufferNode(dev_data, buffer);
412    if (buffer_node) {
413        skipCall = validate_usage_flags(dev_data, buffer_node->createInfo.usage, desired, strict, (uint64_t)buffer,
414                                        VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "buffer", func_name, usage_string);
415    }
416    return skipCall;
417}
418
419// Return ptr to info in map container containing mem, or NULL if not found
420//  Calls to this function should be wrapped in mutex
421DEVICE_MEM_INFO *getMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
422    auto mem_it = dev_data->memObjMap.find(mem);
423    if (mem_it == dev_data->memObjMap.end()) {
424        return NULL;
425    }
426    return mem_it->second.get();
427}
428
429static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
430                             const VkMemoryAllocateInfo *pAllocateInfo) {
431    assert(object != NULL);
432
433    my_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo));
434}
435
436static bool validate_memory_is_valid(layer_data *dev_data, VkDeviceMemory mem, const char *functionName,
437                                     VkImage image = VK_NULL_HANDLE) {
438    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
439        auto const image_node = getImageNode(dev_data, image);
440        if (image_node && !image_node->valid) {
441            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
442                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
443                           "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
444                           functionName, (uint64_t)(image));
445        }
446    } else {
447        DEVICE_MEM_INFO *pMemObj = getMemObjInfo(dev_data, mem);
448        if (pMemObj && !pMemObj->valid) {
449            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
450                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
451                           "%s: Cannot read invalid memory 0x%" PRIx64 ", please fill the memory before using.", functionName,
452                           (uint64_t)(mem));
453        }
454    }
455    return false;
456}
457
458static void set_memory_valid(layer_data *dev_data, VkDeviceMemory mem, bool valid, VkImage image = VK_NULL_HANDLE) {
459    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
460        auto image_node = getImageNode(dev_data, image);
461        if (image_node) {
462            image_node->valid = valid;
463        }
464    } else {
465        DEVICE_MEM_INFO *pMemObj = getMemObjInfo(dev_data, mem);
466        if (pMemObj) {
467            pMemObj->valid = valid;
468        }
469    }
470}
471
472// Find CB Info and add mem reference to list container
473// Find Mem Obj Info and add CB reference to list container
474static bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
475                                              const char *apiName) {
476    bool skipCall = false;
477
478    // Skip validation if this image was created through WSI
479    if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
480
481        // First update CB binding in MemObj mini CB list
482        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem);
483        if (pMemInfo) {
484            pMemInfo->commandBufferBindings.insert(cb);
485            // Now update CBInfo's Mem reference list
486            GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
487            // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
488            if (pCBNode) {
489                pCBNode->memObjs.insert(mem);
490            }
491        }
492    }
493    return skipCall;
494}
495// For every mem obj bound to particular CB, free bindings related to that CB
496static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *pCBNode) {
497    if (pCBNode) {
498        if (pCBNode->memObjs.size() > 0) {
499            for (auto mem : pCBNode->memObjs) {
500                DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
501                if (pInfo) {
502                    pInfo->commandBufferBindings.erase(pCBNode->commandBuffer);
503                }
504            }
505            pCBNode->memObjs.clear();
506        }
507        pCBNode->validate_functions.clear();
508    }
509}
510// Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up
511static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
512    clear_cmd_buf_and_mem_references(dev_data, getCBNode(dev_data, cb));
513}
514
515// For given MemObjInfo, report Obj & CB bindings
516static bool reportMemReferencesAndCleanUp(layer_data *dev_data, DEVICE_MEM_INFO *pMemObjInfo) {
517    bool skipCall = false;
518    size_t cmdBufRefCount = pMemObjInfo->commandBufferBindings.size();
519    size_t objRefCount = pMemObjInfo->objBindings.size();
520
521    if ((pMemObjInfo->commandBufferBindings.size()) != 0) {
522        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
523                           (uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
524                           "Attempting to free memory object 0x%" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER
525                           " references",
526                           (uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount));
527    }
528
529    if (cmdBufRefCount > 0 && pMemObjInfo->commandBufferBindings.size() > 0) {
530        for (auto cb : pMemObjInfo->commandBufferBindings) {
531            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
532                    (uint64_t)cb, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
533                    "Command Buffer 0x%p still has a reference to mem obj 0x%" PRIxLEAST64, cb, (uint64_t)pMemObjInfo->mem);
534        }
535        // Clear the list of hanging references
536        pMemObjInfo->commandBufferBindings.clear();
537    }
538
539    if (objRefCount > 0 && pMemObjInfo->objBindings.size() > 0) {
540        for (auto obj : pMemObjInfo->objBindings) {
541            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__,
542                    MEMTRACK_FREED_MEM_REF, "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64,
543                    obj.handle, (uint64_t)pMemObjInfo->mem);
544        }
545        // Clear the list of hanging references
546        pMemObjInfo->objBindings.clear();
547    }
548    return skipCall;
549}
550
551static bool deleteMemObjInfo(layer_data *my_data, void *object, VkDeviceMemory mem) {
552    bool skipCall = false;
553    auto item = my_data->memObjMap.find(mem);
554    if (item != my_data->memObjMap.end()) {
555        my_data->memObjMap.erase(item);
556    } else {
557        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
558                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
559                           "Request to delete memory object 0x%" PRIxLEAST64 " not present in memory Object Map", (uint64_t)mem);
560    }
561    return skipCall;
562}
563
564static bool freeMemObjInfo(layer_data *dev_data, void *object, VkDeviceMemory mem, bool internal) {
565    bool skipCall = false;
566    // Parse global list to find info w/ mem
567    DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
568    if (pInfo) {
569        if (pInfo->allocInfo.allocationSize == 0 && !internal) {
570            // TODO: Verify against Valid Use section
571            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
572                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
573                               "Attempting to free memory associated with a Persistent Image, 0x%" PRIxLEAST64 ", "
574                               "this should not be explicitly freed\n",
575                               (uint64_t)mem);
576        } else {
577            // Clear any CB bindings for completed CBs
578            //   TODO : Is there a better place to do this?
579
580            assert(pInfo->object != VK_NULL_HANDLE);
581            // clear_cmd_buf_and_mem_references removes elements from
582            // pInfo->commandBufferBindings -- this copy not needed in c++14,
583            // and probably not needed in practice in c++11
584            auto bindings = pInfo->commandBufferBindings;
585            for (auto cb : bindings) {
586                if (!dev_data->globalInFlightCmdBuffers.count(cb)) {
587                    clear_cmd_buf_and_mem_references(dev_data, cb);
588                }
589            }
590
591            // Now verify that no references to this mem obj remain and remove bindings
592            if (pInfo->commandBufferBindings.size() || pInfo->objBindings.size()) {
593                skipCall |= reportMemReferencesAndCleanUp(dev_data, pInfo);
594            }
595            // Delete mem obj info
596            skipCall |= deleteMemObjInfo(dev_data, object, mem);
597        }
598    }
599    return skipCall;
600}
601
602static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
603    switch (type) {
604    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
605        return "image";
606    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
607        return "buffer";
608    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
609        return "swapchain";
610    default:
611        return "unknown";
612    }
613}
614
615// Remove object binding performs 3 tasks:
616// 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it
617// 2. Clear mem binding for image/buffer by setting its handle to 0
618// TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized?
619static bool clear_object_binding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
620    // TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately
621    bool skipCall = false;
622    VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
623    if (pMemBinding) {
624        DEVICE_MEM_INFO *pMemObjInfo = getMemObjInfo(dev_data, *pMemBinding);
625        // TODO : Make sure this is a reasonable way to reset mem binding
626        *pMemBinding = VK_NULL_HANDLE;
627        if (pMemObjInfo) {
628            // This obj is bound to a memory object. Remove the reference to this object in that memory object's list,
629            // and set the objects memory binding pointer to NULL.
630            if (!pMemObjInfo->objBindings.erase({handle, type})) {
631                skipCall |=
632                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
633                            "MEM", "While trying to clear mem binding for %s obj 0x%" PRIxLEAST64
634                                   ", unable to find that object referenced by mem obj 0x%" PRIxLEAST64,
635                            object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem);
636            }
637        }
638    }
639    return skipCall;
640}
641
642// For NULL mem case, output warning
643// Make sure given object is in global object map
644//  IF a previous binding existed, output validation error
645//  Otherwise, add reference from objectInfo to memoryInfo
646//  Add reference off of objInfo
647static bool set_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
648                                VkDebugReportObjectTypeEXT type, const char *apiName) {
649    bool skipCall = false;
650    // Handle NULL case separately, just clear previous binding & decrement reference
651    if (mem == VK_NULL_HANDLE) {
652        // TODO: Verify against Valid Use section of spec.
653        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ,
654                           "MEM", "In %s, attempting to Bind Obj(0x%" PRIxLEAST64 ") to NULL", apiName, handle);
655    } else {
656        VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
657        assert(pMemBinding);
658        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem);
659        if (pMemInfo) {
660            DEVICE_MEM_INFO *pPrevBinding = getMemObjInfo(dev_data, *pMemBinding);
661            if (pPrevBinding != NULL) {
662                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
663                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_REBIND_OBJECT,
664                                    "MEM", "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
665                                           ") which has already been bound to mem object 0x%" PRIxLEAST64,
666                                    apiName, (uint64_t)mem, handle, (uint64_t)pPrevBinding->mem);
667            } else {
668                pMemInfo->objBindings.insert({handle, type});
669                // For image objects, make sure default memory state is correctly set
670                // TODO : What's the best/correct way to handle this?
671                if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
672                    auto const image_node = getImageNode(dev_data, VkImage(handle));
673                    if (image_node) {
674                        VkImageCreateInfo ici = image_node->createInfo;
675                        if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
676                            // TODO::  More memory state transition stuff.
677                        }
678                    }
679                }
680                *pMemBinding = mem;
681            }
682        }
683    }
684    return skipCall;
685}
686
687// For NULL mem case, clear any previous binding Else...
688// Make sure given object is in its object map
689//  IF a previous binding existed, update binding
690//  Add reference from objectInfo to memoryInfo
691//  Add reference off of object's binding info
692// Return VK_TRUE if addition is successful, VK_FALSE otherwise
693static bool set_sparse_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
694                                       VkDebugReportObjectTypeEXT type, const char *apiName) {
695    bool skipCall = VK_FALSE;
696    // Handle NULL case separately, just clear previous binding & decrement reference
697    if (mem == VK_NULL_HANDLE) {
698        skipCall = clear_object_binding(dev_data, handle, type);
699    } else {
700        VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
701        assert(pMemBinding);
702        DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
703        if (pInfo) {
704            pInfo->objBindings.insert({handle, type});
705            // Need to set mem binding for this object
706            *pMemBinding = mem;
707        }
708    }
709    return skipCall;
710}
711
712// For given Object, get 'mem' obj that it's bound to or NULL if no binding
713static bool get_mem_binding_from_object(layer_data *dev_data, const uint64_t handle,
714                                            const VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) {
715    bool skipCall = false;
716    *mem = VK_NULL_HANDLE;
717    VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
718    if (pMemBinding) {
719        *mem = *pMemBinding;
720    } else {
721        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
722                           "MEM", "Trying to get mem binding for object 0x%" PRIxLEAST64 " but no such object in %s list", handle,
723                           object_type_to_string(type));
724    }
725    return skipCall;
726}
727
728// Print details of MemObjInfo list
729static void print_mem_list(layer_data *dev_data) {
730    // Early out if info is not requested
731    if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
732        return;
733    }
734
735    // Just printing each msg individually for now, may want to package these into single large print
736    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
737            MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
738            dev_data->memObjMap.size());
739    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
740            MEMTRACK_NONE, "MEM", "=============================");
741
742    if (dev_data->memObjMap.size() <= 0)
743        return;
744
745    for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
746        auto mem_info = (*ii).second.get();
747
748        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
749                __LINE__, MEMTRACK_NONE, "MEM", "    ===MemObjInfo at 0x%p===", (void *)mem_info);
750        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
751                __LINE__, MEMTRACK_NONE, "MEM", "    Mem object: 0x%" PRIxLEAST64, (uint64_t)(mem_info->mem));
752        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
753                __LINE__, MEMTRACK_NONE, "MEM", "    Ref Count: " PRINTF_SIZE_T_SPECIFIER,
754                mem_info->commandBufferBindings.size() + mem_info->objBindings.size());
755        if (0 != mem_info->allocInfo.allocationSize) {
756            string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&mem_info->allocInfo, "MEM(INFO):         ");
757            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
758                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
759        } else {
760            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
761                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
762        }
763
764        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
765                __LINE__, MEMTRACK_NONE, "MEM", "    VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:",
766                mem_info->objBindings.size());
767        if (mem_info->objBindings.size() > 0) {
768            for (auto obj : mem_info->objBindings) {
769                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
770                        0, __LINE__, MEMTRACK_NONE, "MEM", "       VK OBJECT 0x%" PRIx64, obj.handle);
771            }
772        }
773
774        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
775                __LINE__, MEMTRACK_NONE, "MEM",
776                "    VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements",
777                mem_info->commandBufferBindings.size());
778        if (mem_info->commandBufferBindings.size() > 0) {
779            for (auto cb : mem_info->commandBufferBindings) {
780                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
781                        0, __LINE__, MEMTRACK_NONE, "MEM", "      VK CB 0x%p", cb);
782            }
783        }
784    }
785}
786
787static void printCBList(layer_data *my_data) {
788    GLOBAL_CB_NODE *pCBInfo = NULL;
789
790    // Early out if info is not requested
791    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
792        return;
793    }
794
795    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
796            MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
797            my_data->commandBufferMap.size());
798    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
799            MEMTRACK_NONE, "MEM", "==================");
800
801    if (my_data->commandBufferMap.size() <= 0)
802        return;
803
804    for (auto &cb_node : my_data->commandBufferMap) {
805        pCBInfo = cb_node.second;
806
807        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
808                __LINE__, MEMTRACK_NONE, "MEM", "    CB Info (0x%p) has CB 0x%p", (void *)pCBInfo, (void *)pCBInfo->commandBuffer);
809
810        if (pCBInfo->memObjs.size() <= 0)
811            continue;
812        for (auto obj : pCBInfo->memObjs) {
813            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
814                    __LINE__, MEMTRACK_NONE, "MEM", "      Mem obj 0x%" PRIx64, (uint64_t)obj);
815        }
816    }
817}
818
819// Return a string representation of CMD_TYPE enum
820static string cmdTypeToString(CMD_TYPE cmd) {
821    switch (cmd) {
822    case CMD_BINDPIPELINE:
823        return "CMD_BINDPIPELINE";
824    case CMD_BINDPIPELINEDELTA:
825        return "CMD_BINDPIPELINEDELTA";
826    case CMD_SETVIEWPORTSTATE:
827        return "CMD_SETVIEWPORTSTATE";
828    case CMD_SETLINEWIDTHSTATE:
829        return "CMD_SETLINEWIDTHSTATE";
830    case CMD_SETDEPTHBIASSTATE:
831        return "CMD_SETDEPTHBIASSTATE";
832    case CMD_SETBLENDSTATE:
833        return "CMD_SETBLENDSTATE";
834    case CMD_SETDEPTHBOUNDSSTATE:
835        return "CMD_SETDEPTHBOUNDSSTATE";
836    case CMD_SETSTENCILREADMASKSTATE:
837        return "CMD_SETSTENCILREADMASKSTATE";
838    case CMD_SETSTENCILWRITEMASKSTATE:
839        return "CMD_SETSTENCILWRITEMASKSTATE";
840    case CMD_SETSTENCILREFERENCESTATE:
841        return "CMD_SETSTENCILREFERENCESTATE";
842    case CMD_BINDDESCRIPTORSETS:
843        return "CMD_BINDDESCRIPTORSETS";
844    case CMD_BINDINDEXBUFFER:
845        return "CMD_BINDINDEXBUFFER";
846    case CMD_BINDVERTEXBUFFER:
847        return "CMD_BINDVERTEXBUFFER";
848    case CMD_DRAW:
849        return "CMD_DRAW";
850    case CMD_DRAWINDEXED:
851        return "CMD_DRAWINDEXED";
852    case CMD_DRAWINDIRECT:
853        return "CMD_DRAWINDIRECT";
854    case CMD_DRAWINDEXEDINDIRECT:
855        return "CMD_DRAWINDEXEDINDIRECT";
856    case CMD_DISPATCH:
857        return "CMD_DISPATCH";
858    case CMD_DISPATCHINDIRECT:
859        return "CMD_DISPATCHINDIRECT";
860    case CMD_COPYBUFFER:
861        return "CMD_COPYBUFFER";
862    case CMD_COPYIMAGE:
863        return "CMD_COPYIMAGE";
864    case CMD_BLITIMAGE:
865        return "CMD_BLITIMAGE";
866    case CMD_COPYBUFFERTOIMAGE:
867        return "CMD_COPYBUFFERTOIMAGE";
868    case CMD_COPYIMAGETOBUFFER:
869        return "CMD_COPYIMAGETOBUFFER";
870    case CMD_CLONEIMAGEDATA:
871        return "CMD_CLONEIMAGEDATA";
872    case CMD_UPDATEBUFFER:
873        return "CMD_UPDATEBUFFER";
874    case CMD_FILLBUFFER:
875        return "CMD_FILLBUFFER";
876    case CMD_CLEARCOLORIMAGE:
877        return "CMD_CLEARCOLORIMAGE";
878    case CMD_CLEARATTACHMENTS:
879        return "CMD_CLEARCOLORATTACHMENT";
880    case CMD_CLEARDEPTHSTENCILIMAGE:
881        return "CMD_CLEARDEPTHSTENCILIMAGE";
882    case CMD_RESOLVEIMAGE:
883        return "CMD_RESOLVEIMAGE";
884    case CMD_SETEVENT:
885        return "CMD_SETEVENT";
886    case CMD_RESETEVENT:
887        return "CMD_RESETEVENT";
888    case CMD_WAITEVENTS:
889        return "CMD_WAITEVENTS";
890    case CMD_PIPELINEBARRIER:
891        return "CMD_PIPELINEBARRIER";
892    case CMD_BEGINQUERY:
893        return "CMD_BEGINQUERY";
894    case CMD_ENDQUERY:
895        return "CMD_ENDQUERY";
896    case CMD_RESETQUERYPOOL:
897        return "CMD_RESETQUERYPOOL";
898    case CMD_COPYQUERYPOOLRESULTS:
899        return "CMD_COPYQUERYPOOLRESULTS";
900    case CMD_WRITETIMESTAMP:
901        return "CMD_WRITETIMESTAMP";
902    case CMD_INITATOMICCOUNTERS:
903        return "CMD_INITATOMICCOUNTERS";
904    case CMD_LOADATOMICCOUNTERS:
905        return "CMD_LOADATOMICCOUNTERS";
906    case CMD_SAVEATOMICCOUNTERS:
907        return "CMD_SAVEATOMICCOUNTERS";
908    case CMD_BEGINRENDERPASS:
909        return "CMD_BEGINRENDERPASS";
910    case CMD_ENDRENDERPASS:
911        return "CMD_ENDRENDERPASS";
912    default:
913        return "UNKNOWN";
914    }
915}
916
917// SPIRV utility functions
918static void build_def_index(shader_module *module) {
919    for (auto insn : *module) {
920        switch (insn.opcode()) {
921        /* Types */
922        case spv::OpTypeVoid:
923        case spv::OpTypeBool:
924        case spv::OpTypeInt:
925        case spv::OpTypeFloat:
926        case spv::OpTypeVector:
927        case spv::OpTypeMatrix:
928        case spv::OpTypeImage:
929        case spv::OpTypeSampler:
930        case spv::OpTypeSampledImage:
931        case spv::OpTypeArray:
932        case spv::OpTypeRuntimeArray:
933        case spv::OpTypeStruct:
934        case spv::OpTypeOpaque:
935        case spv::OpTypePointer:
936        case spv::OpTypeFunction:
937        case spv::OpTypeEvent:
938        case spv::OpTypeDeviceEvent:
939        case spv::OpTypeReserveId:
940        case spv::OpTypeQueue:
941        case spv::OpTypePipe:
942            module->def_index[insn.word(1)] = insn.offset();
943            break;
944
945        /* Fixed constants */
946        case spv::OpConstantTrue:
947        case spv::OpConstantFalse:
948        case spv::OpConstant:
949        case spv::OpConstantComposite:
950        case spv::OpConstantSampler:
951        case spv::OpConstantNull:
952            module->def_index[insn.word(2)] = insn.offset();
953            break;
954
955        /* Specialization constants */
956        case spv::OpSpecConstantTrue:
957        case spv::OpSpecConstantFalse:
958        case spv::OpSpecConstant:
959        case spv::OpSpecConstantComposite:
960        case spv::OpSpecConstantOp:
961            module->def_index[insn.word(2)] = insn.offset();
962            break;
963
964        /* Variables */
965        case spv::OpVariable:
966            module->def_index[insn.word(2)] = insn.offset();
967            break;
968
969        /* Functions */
970        case spv::OpFunction:
971            module->def_index[insn.word(2)] = insn.offset();
972            break;
973
974        default:
975            /* We don't care about any other defs for now. */
976            break;
977        }
978    }
979}
980
981static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
982    for (auto insn : *src) {
983        if (insn.opcode() == spv::OpEntryPoint) {
984            auto entrypointName = (char const *)&insn.word(3);
985            auto entrypointStageBits = 1u << insn.word(1);
986
987            if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
988                return insn;
989            }
990        }
991    }
992
993    return src->end();
994}
995
996static char const *storage_class_name(unsigned sc) {
997    switch (sc) {
998    case spv::StorageClassInput:
999        return "input";
1000    case spv::StorageClassOutput:
1001        return "output";
1002    case spv::StorageClassUniformConstant:
1003        return "const uniform";
1004    case spv::StorageClassUniform:
1005        return "uniform";
1006    case spv::StorageClassWorkgroup:
1007        return "workgroup local";
1008    case spv::StorageClassCrossWorkgroup:
1009        return "workgroup global";
1010    case spv::StorageClassPrivate:
1011        return "private global";
1012    case spv::StorageClassFunction:
1013        return "function";
1014    case spv::StorageClassGeneric:
1015        return "generic";
1016    case spv::StorageClassAtomicCounter:
1017        return "atomic counter";
1018    case spv::StorageClassImage:
1019        return "image";
1020    case spv::StorageClassPushConstant:
1021        return "push constant";
1022    default:
1023        return "unknown";
1024    }
1025}
1026
1027/* get the value of an integral constant */
1028unsigned get_constant_value(shader_module const *src, unsigned id) {
1029    auto value = src->get_def(id);
1030    assert(value != src->end());
1031
1032    if (value.opcode() != spv::OpConstant) {
1033        /* TODO: Either ensure that the specialization transform is already performed on a module we're
1034            considering here, OR -- specialize on the fly now.
1035            */
1036        return 1;
1037    }
1038
1039    return value.word(3);
1040}
1041
1042
1043static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
1044    auto insn = src->get_def(type);
1045    assert(insn != src->end());
1046
1047    switch (insn.opcode()) {
1048    case spv::OpTypeBool:
1049        ss << "bool";
1050        break;
1051    case spv::OpTypeInt:
1052        ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
1053        break;
1054    case spv::OpTypeFloat:
1055        ss << "float" << insn.word(2);
1056        break;
1057    case spv::OpTypeVector:
1058        ss << "vec" << insn.word(3) << " of ";
1059        describe_type_inner(ss, src, insn.word(2));
1060        break;
1061    case spv::OpTypeMatrix:
1062        ss << "mat" << insn.word(3) << " of ";
1063        describe_type_inner(ss, src, insn.word(2));
1064        break;
1065    case spv::OpTypeArray:
1066        ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
1067        describe_type_inner(ss, src, insn.word(2));
1068        break;
1069    case spv::OpTypePointer:
1070        ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1071        describe_type_inner(ss, src, insn.word(3));
1072        break;
1073    case spv::OpTypeStruct: {
1074        ss << "struct of (";
1075        for (unsigned i = 2; i < insn.len(); i++) {
1076            describe_type_inner(ss, src, insn.word(i));
1077            if (i == insn.len() - 1) {
1078                ss << ")";
1079            } else {
1080                ss << ", ";
1081            }
1082        }
1083        break;
1084    }
1085    case spv::OpTypeSampler:
1086        ss << "sampler";
1087        break;
1088    case spv::OpTypeSampledImage:
1089        ss << "sampler+";
1090        describe_type_inner(ss, src, insn.word(2));
1091        break;
1092    case spv::OpTypeImage:
1093        ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1094        break;
1095    default:
1096        ss << "oddtype";
1097        break;
1098    }
1099}
1100
1101
1102static std::string describe_type(shader_module const *src, unsigned type) {
1103    std::ostringstream ss;
1104    describe_type_inner(ss, src, type);
1105    return ss.str();
1106}
1107
1108
1109static bool is_narrow_numeric_type(spirv_inst_iter type)
1110{
1111    if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat)
1112        return false;
1113    return type.word(2) < 64;
1114}
1115
1116
1117static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed, bool b_arrayed, bool relaxed) {
1118    /* walk two type trees together, and complain about differences */
1119    auto a_insn = a->get_def(a_type);
1120    auto b_insn = b->get_def(b_type);
1121    assert(a_insn != a->end());
1122    assert(b_insn != b->end());
1123
1124    if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) {
1125        return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed);
1126    }
1127
1128    if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1129        /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */
1130        return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed);
1131    }
1132
1133    if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) {
1134        return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false);
1135    }
1136
1137    if (a_insn.opcode() != b_insn.opcode()) {
1138        return false;
1139    }
1140
1141    if (a_insn.opcode() == spv::OpTypePointer) {
1142        /* match on pointee type. storage class is expected to differ */
1143        return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed);
1144    }
1145
1146    if (a_arrayed || b_arrayed) {
1147        /* if we havent resolved array-of-verts by here, we're not going to. */
1148        return false;
1149    }
1150
1151    switch (a_insn.opcode()) {
1152    case spv::OpTypeBool:
1153        return true;
1154    case spv::OpTypeInt:
1155        /* match on width, signedness */
1156        return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3);
1157    case spv::OpTypeFloat:
1158        /* match on width */
1159        return a_insn.word(2) == b_insn.word(2);
1160    case spv::OpTypeVector:
1161        /* match on element type, count. */
1162        if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false))
1163            return false;
1164        if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) {
1165            return a_insn.word(3) >= b_insn.word(3);
1166        }
1167        else {
1168            return a_insn.word(3) == b_insn.word(3);
1169        }
1170    case spv::OpTypeMatrix:
1171        /* match on element type, count. */
1172        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && a_insn.word(3) == b_insn.word(3);
1173    case spv::OpTypeArray:
1174        /* match on element type, count. these all have the same layout. we don't get here if
1175         * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction,
1176         * not a literal within OpTypeArray */
1177        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
1178               get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1179    case spv::OpTypeStruct:
1180        /* match on all element types */
1181        {
1182            if (a_insn.len() != b_insn.len()) {
1183                return false; /* structs cannot match if member counts differ */
1184            }
1185
1186            for (unsigned i = 2; i < a_insn.len(); i++) {
1187                if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) {
1188                    return false;
1189                }
1190            }
1191
1192            return true;
1193        }
1194    default:
1195        /* remaining types are CLisms, or may not appear in the interfaces we
1196         * are interested in. Just claim no match.
1197         */
1198        return false;
1199    }
1200}
1201
1202static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1203    auto it = map.find(id);
1204    if (it == map.end())
1205        return def;
1206    else
1207        return it->second;
1208}
1209
1210static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1211    auto insn = src->get_def(type);
1212    assert(insn != src->end());
1213
1214    switch (insn.opcode()) {
1215    case spv::OpTypePointer:
1216        /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
1217         * we're never actually passing pointers around. */
1218        return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1219    case spv::OpTypeArray:
1220        if (strip_array_level) {
1221            return get_locations_consumed_by_type(src, insn.word(2), false);
1222        } else {
1223            return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1224        }
1225    case spv::OpTypeMatrix:
1226        /* num locations is the dimension * element size */
1227        return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1228    case spv::OpTypeVector: {
1229        auto scalar_type = src->get_def(insn.word(2));
1230        auto bit_width = (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ?
1231            scalar_type.word(2) : 32;
1232
1233        /* locations are 128-bit wide; 3- and 4-component vectors of 64 bit
1234         * types require two. */
1235        return (bit_width * insn.word(3) + 127) / 128;
1236    }
1237    default:
1238        /* everything else is just 1. */
1239        return 1;
1240
1241        /* TODO: extend to handle 64bit scalar types, whose vectors may need
1242         * multiple locations. */
1243    }
1244}
1245
1246static unsigned get_locations_consumed_by_format(VkFormat format) {
1247    switch (format) {
1248    case VK_FORMAT_R64G64B64A64_SFLOAT:
1249    case VK_FORMAT_R64G64B64A64_SINT:
1250    case VK_FORMAT_R64G64B64A64_UINT:
1251    case VK_FORMAT_R64G64B64_SFLOAT:
1252    case VK_FORMAT_R64G64B64_SINT:
1253    case VK_FORMAT_R64G64B64_UINT:
1254        return 2;
1255    default:
1256        return 1;
1257    }
1258}
1259
1260typedef std::pair<unsigned, unsigned> location_t;
1261typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1262
1263struct interface_var {
1264    uint32_t id;
1265    uint32_t type_id;
1266    uint32_t offset;
1267    bool is_patch;
1268    bool is_block_member;
1269    /* TODO: collect the name, too? Isn't required to be present. */
1270};
1271
1272struct shader_stage_attributes {
1273    char const *const name;
1274    bool arrayed_input;
1275    bool arrayed_output;
1276};
1277
1278static shader_stage_attributes shader_stage_attribs[] = {
1279    {"vertex shader", false, false},
1280    {"tessellation control shader", true, true},
1281    {"tessellation evaluation shader", true, false},
1282    {"geometry shader", true, false},
1283    {"fragment shader", false, false},
1284};
1285
1286static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1287    while (true) {
1288
1289        if (def.opcode() == spv::OpTypePointer) {
1290            def = src->get_def(def.word(3));
1291        } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1292            def = src->get_def(def.word(2));
1293            is_array_of_verts = false;
1294        } else if (def.opcode() == spv::OpTypeStruct) {
1295            return def;
1296        } else {
1297            return src->end();
1298        }
1299    }
1300}
1301
1302static void collect_interface_block_members(shader_module const *src,
1303                                            std::map<location_t, interface_var> &out,
1304                                            std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1305                                            uint32_t id, uint32_t type_id, bool is_patch) {
1306    /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */
1307    auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch);
1308    if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1309        /* this isn't an interface block. */
1310        return;
1311    }
1312
1313    std::unordered_map<unsigned, unsigned> member_components;
1314
1315    /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */
1316    for (auto insn : *src) {
1317        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1318            unsigned member_index = insn.word(2);
1319
1320            if (insn.word(3) == spv::DecorationComponent) {
1321                unsigned component = insn.word(4);
1322                member_components[member_index] = component;
1323            }
1324        }
1325    }
1326
1327    /* Second pass -- produce the output, from Location decorations */
1328    for (auto insn : *src) {
1329        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1330            unsigned member_index = insn.word(2);
1331            unsigned member_type_id = type.word(2 + member_index);
1332
1333            if (insn.word(3) == spv::DecorationLocation) {
1334                unsigned location = insn.word(4);
1335                unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1336                auto component_it = member_components.find(member_index);
1337                unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1338
1339                for (unsigned int offset = 0; offset < num_locations; offset++) {
1340                    interface_var v;
1341                    v.id = id;
1342                    /* TODO: member index in interface_var too? */
1343                    v.type_id = member_type_id;
1344                    v.offset = offset;
1345                    v.is_patch = is_patch;
1346                    v.is_block_member = true;
1347                    out[std::make_pair(location + offset, component)] = v;
1348                }
1349            }
1350        }
1351    }
1352}
1353
1354static void collect_interface_by_location(shader_module const *src, spirv_inst_iter entrypoint,
1355                                          spv::StorageClass sinterface, std::map<location_t, interface_var> &out,
1356                                          bool is_array_of_verts) {
1357    std::unordered_map<unsigned, unsigned> var_locations;
1358    std::unordered_map<unsigned, unsigned> var_builtins;
1359    std::unordered_map<unsigned, unsigned> var_components;
1360    std::unordered_map<unsigned, unsigned> blocks;
1361    std::unordered_map<unsigned, unsigned> var_patch;
1362
1363    for (auto insn : *src) {
1364
1365        /* We consider two interface models: SSO rendezvous-by-location, and
1366         * builtins. Complain about anything that fits neither model.
1367         */
1368        if (insn.opcode() == spv::OpDecorate) {
1369            if (insn.word(2) == spv::DecorationLocation) {
1370                var_locations[insn.word(1)] = insn.word(3);
1371            }
1372
1373            if (insn.word(2) == spv::DecorationBuiltIn) {
1374                var_builtins[insn.word(1)] = insn.word(3);
1375            }
1376
1377            if (insn.word(2) == spv::DecorationComponent) {
1378                var_components[insn.word(1)] = insn.word(3);
1379            }
1380
1381            if (insn.word(2) == spv::DecorationBlock) {
1382                blocks[insn.word(1)] = 1;
1383            }
1384
1385            if (insn.word(2) == spv::DecorationPatch) {
1386                var_patch[insn.word(1)] = 1;
1387            }
1388        }
1389    }
1390
1391    /* TODO: handle grouped decorations */
1392    /* TODO: handle index=1 dual source outputs from FS -- two vars will
1393     * have the same location, and we DON'T want to clobber. */
1394
1395    /* find the end of the entrypoint's name string. additional zero bytes follow the actual null
1396       terminator, to fill out the rest of the word - so we only need to look at the last byte in
1397       the word to determine which word contains the terminator. */
1398    uint32_t word = 3;
1399    while (entrypoint.word(word) & 0xff000000u) {
1400        ++word;
1401    }
1402    ++word;
1403
1404    for (; word < entrypoint.len(); word++) {
1405        auto insn = src->get_def(entrypoint.word(word));
1406        assert(insn != src->end());
1407        assert(insn.opcode() == spv::OpVariable);
1408
1409        if (insn.word(3) == static_cast<uint32_t>(sinterface)) {
1410            unsigned id = insn.word(2);
1411            unsigned type = insn.word(1);
1412
1413            int location = value_or_default(var_locations, id, -1);
1414            int builtin = value_or_default(var_builtins, id, -1);
1415            unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */
1416            bool is_patch = var_patch.find(id) != var_patch.end();
1417
1418            /* All variables and interface block members in the Input or Output storage classes
1419             * must be decorated with either a builtin or an explicit location.
1420             *
1421             * TODO: integrate the interface block support here. For now, don't complain --
1422             * a valid SPIRV module will only hit this path for the interface block case, as the
1423             * individual members of the type are decorated, rather than variable declarations.
1424             */
1425
1426            if (location != -1) {
1427                /* A user-defined interface variable, with a location. Where a variable
1428                 * occupied multiple locations, emit one result for each. */
1429                unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch);
1430                for (unsigned int offset = 0; offset < num_locations; offset++) {
1431                    interface_var v;
1432                    v.id = id;
1433                    v.type_id = type;
1434                    v.offset = offset;
1435                    v.is_patch = is_patch;
1436                    v.is_block_member = false;
1437                    out[std::make_pair(location + offset, component)] = v;
1438                }
1439            } else if (builtin == -1) {
1440                /* An interface block instance */
1441                collect_interface_block_members(src, out, blocks, is_array_of_verts, id, type, is_patch);
1442            }
1443        }
1444    }
1445}
1446
1447static void collect_interface_by_descriptor_slot(debug_report_data *report_data, shader_module const *src,
1448                                                 std::unordered_set<uint32_t> const &accessible_ids,
1449                                                 std::map<descriptor_slot_t, interface_var> &out) {
1450
1451    std::unordered_map<unsigned, unsigned> var_sets;
1452    std::unordered_map<unsigned, unsigned> var_bindings;
1453
1454    for (auto insn : *src) {
1455        /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1456         * DecorationDescriptorSet and DecorationBinding.
1457         */
1458        if (insn.opcode() == spv::OpDecorate) {
1459            if (insn.word(2) == spv::DecorationDescriptorSet) {
1460                var_sets[insn.word(1)] = insn.word(3);
1461            }
1462
1463            if (insn.word(2) == spv::DecorationBinding) {
1464                var_bindings[insn.word(1)] = insn.word(3);
1465            }
1466        }
1467    }
1468
1469    for (auto id : accessible_ids) {
1470        auto insn = src->get_def(id);
1471        assert(insn != src->end());
1472
1473        if (insn.opcode() == spv::OpVariable &&
1474            (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1475            unsigned set = value_or_default(var_sets, insn.word(2), 0);
1476            unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1477
1478            auto existing_it = out.find(std::make_pair(set, binding));
1479            if (existing_it != out.end()) {
1480                /* conflict within spv image */
1481                log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1482                        __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
1483                        "var %d (type %d) in %s interface in descriptor slot (%u,%u) conflicts with existing definition",
1484                        insn.word(2), insn.word(1), storage_class_name(insn.word(3)), existing_it->first.first,
1485                        existing_it->first.second);
1486            }
1487
1488            interface_var v;
1489            v.id = insn.word(2);
1490            v.type_id = insn.word(1);
1491            v.offset = 0;
1492            v.is_patch = false;
1493            v.is_block_member = false;
1494            out[std::make_pair(set, binding)] = v;
1495        }
1496    }
1497}
1498
1499static bool validate_interface_between_stages(debug_report_data *report_data, shader_module const *producer,
1500                                              spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage,
1501                                              shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1502                                              shader_stage_attributes const *consumer_stage) {
1503    std::map<location_t, interface_var> outputs;
1504    std::map<location_t, interface_var> inputs;
1505
1506    bool pass = true;
1507
1508    collect_interface_by_location(producer, producer_entrypoint, spv::StorageClassOutput, outputs, producer_stage->arrayed_output);
1509    collect_interface_by_location(consumer, consumer_entrypoint, spv::StorageClassInput, inputs, consumer_stage->arrayed_input);
1510
1511    auto a_it = outputs.begin();
1512    auto b_it = inputs.begin();
1513
1514    /* maps sorted by key (location); walk them together to find mismatches */
1515    while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1516        bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1517        bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1518        auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1519        auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1520
1521        if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1522            if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1523                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1524                        "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first,
1525                        a_first.second, consumer_stage->name)) {
1526                pass = false;
1527            }
1528            a_it++;
1529        } else if (a_at_end || a_first > b_first) {
1530            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1531                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
1532                        "%s consumes input location %u.%u which is not written by %s", consumer_stage->name, b_first.first, b_first.second,
1533                        producer_stage->name)) {
1534                pass = false;
1535            }
1536            b_it++;
1537        } else {
1538            // subtleties of arrayed interfaces:
1539            // - if is_patch, then the member is not arrayed, even though the interface may be.
1540            // - if is_block_member, then the extra array level of an arrayed interface is not
1541            //   expressed in the member type -- it's expressed in the block type.
1542            if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id,
1543                             producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member,
1544                             consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member,
1545                             true)) {
1546                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1547                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1548                            a_first.first, a_first.second,
1549                            describe_type(producer, a_it->second.type_id).c_str(),
1550                            describe_type(consumer, b_it->second.type_id).c_str())) {
1551                    pass = false;
1552                }
1553            }
1554            if (a_it->second.is_patch != b_it->second.is_patch) {
1555                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1556                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1557                            "Decoration mismatch on location %u.%u: is per-%s in %s stage but "
1558                            "per-%s in %s stage", a_first.first, a_first.second,
1559                            a_it->second.is_patch ? "patch" : "vertex", producer_stage->name,
1560                            b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) {
1561                    pass = false;
1562                }
1563            }
1564            a_it++;
1565            b_it++;
1566        }
1567    }
1568
1569    return pass;
1570}
1571
1572enum FORMAT_TYPE {
1573    FORMAT_TYPE_UNDEFINED,
1574    FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
1575    FORMAT_TYPE_SINT,
1576    FORMAT_TYPE_UINT,
1577};
1578
1579static unsigned get_format_type(VkFormat fmt) {
1580    switch (fmt) {
1581    case VK_FORMAT_UNDEFINED:
1582        return FORMAT_TYPE_UNDEFINED;
1583    case VK_FORMAT_R8_SINT:
1584    case VK_FORMAT_R8G8_SINT:
1585    case VK_FORMAT_R8G8B8_SINT:
1586    case VK_FORMAT_R8G8B8A8_SINT:
1587    case VK_FORMAT_R16_SINT:
1588    case VK_FORMAT_R16G16_SINT:
1589    case VK_FORMAT_R16G16B16_SINT:
1590    case VK_FORMAT_R16G16B16A16_SINT:
1591    case VK_FORMAT_R32_SINT:
1592    case VK_FORMAT_R32G32_SINT:
1593    case VK_FORMAT_R32G32B32_SINT:
1594    case VK_FORMAT_R32G32B32A32_SINT:
1595    case VK_FORMAT_R64_SINT:
1596    case VK_FORMAT_R64G64_SINT:
1597    case VK_FORMAT_R64G64B64_SINT:
1598    case VK_FORMAT_R64G64B64A64_SINT:
1599    case VK_FORMAT_B8G8R8_SINT:
1600    case VK_FORMAT_B8G8R8A8_SINT:
1601    case VK_FORMAT_A8B8G8R8_SINT_PACK32:
1602    case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1603    case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1604        return FORMAT_TYPE_SINT;
1605    case VK_FORMAT_R8_UINT:
1606    case VK_FORMAT_R8G8_UINT:
1607    case VK_FORMAT_R8G8B8_UINT:
1608    case VK_FORMAT_R8G8B8A8_UINT:
1609    case VK_FORMAT_R16_UINT:
1610    case VK_FORMAT_R16G16_UINT:
1611    case VK_FORMAT_R16G16B16_UINT:
1612    case VK_FORMAT_R16G16B16A16_UINT:
1613    case VK_FORMAT_R32_UINT:
1614    case VK_FORMAT_R32G32_UINT:
1615    case VK_FORMAT_R32G32B32_UINT:
1616    case VK_FORMAT_R32G32B32A32_UINT:
1617    case VK_FORMAT_R64_UINT:
1618    case VK_FORMAT_R64G64_UINT:
1619    case VK_FORMAT_R64G64B64_UINT:
1620    case VK_FORMAT_R64G64B64A64_UINT:
1621    case VK_FORMAT_B8G8R8_UINT:
1622    case VK_FORMAT_B8G8R8A8_UINT:
1623    case VK_FORMAT_A8B8G8R8_UINT_PACK32:
1624    case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1625    case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1626        return FORMAT_TYPE_UINT;
1627    default:
1628        return FORMAT_TYPE_FLOAT;
1629    }
1630}
1631
1632/* characterizes a SPIR-V type appearing in an interface to a FF stage,
1633 * for comparison to a VkFormat's characterization above. */
1634static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1635    auto insn = src->get_def(type);
1636    assert(insn != src->end());
1637
1638    switch (insn.opcode()) {
1639    case spv::OpTypeInt:
1640        return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1641    case spv::OpTypeFloat:
1642        return FORMAT_TYPE_FLOAT;
1643    case spv::OpTypeVector:
1644        return get_fundamental_type(src, insn.word(2));
1645    case spv::OpTypeMatrix:
1646        return get_fundamental_type(src, insn.word(2));
1647    case spv::OpTypeArray:
1648        return get_fundamental_type(src, insn.word(2));
1649    case spv::OpTypePointer:
1650        return get_fundamental_type(src, insn.word(3));
1651    default:
1652        return FORMAT_TYPE_UNDEFINED;
1653    }
1654}
1655
1656static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1657    uint32_t bit_pos = u_ffs(stage);
1658    return bit_pos - 1;
1659}
1660
1661static bool validate_vi_consistency(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi) {
1662    /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
1663     * each binding should be specified only once.
1664     */
1665    std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1666    bool pass = true;
1667
1668    for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1669        auto desc = &vi->pVertexBindingDescriptions[i];
1670        auto &binding = bindings[desc->binding];
1671        if (binding) {
1672            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1673                        __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1674                        "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1675                pass = false;
1676            }
1677        } else {
1678            binding = desc;
1679        }
1680    }
1681
1682    return pass;
1683}
1684
1685static bool validate_vi_against_vs_inputs(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi,
1686                                          shader_module const *vs, spirv_inst_iter entrypoint) {
1687    std::map<location_t, interface_var> inputs;
1688    bool pass = true;
1689
1690    collect_interface_by_location(vs, entrypoint, spv::StorageClassInput, inputs, false);
1691
1692    /* Build index by location */
1693    std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1694    if (vi) {
1695        for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) {
1696            auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format);
1697            for (auto j = 0u; j < num_locations; j++) {
1698                attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i];
1699            }
1700        }
1701    }
1702
1703    auto it_a = attribs.begin();
1704    auto it_b = inputs.begin();
1705
1706    while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1707        bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1708        bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1709        auto a_first = a_at_end ? 0 : it_a->first;
1710        auto b_first = b_at_end ? 0 : it_b->first.first;
1711        if (!a_at_end && (b_at_end || a_first < b_first)) {
1712            if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1713                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1714                        "Vertex attribute at location %d not consumed by VS", a_first)) {
1715                pass = false;
1716            }
1717            it_a++;
1718        } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1719            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1720                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "VS consumes input at location %d but not provided",
1721                        b_first)) {
1722                pass = false;
1723            }
1724            it_b++;
1725        } else {
1726            unsigned attrib_type = get_format_type(it_a->second->format);
1727            unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1728
1729            /* type checking */
1730            if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1731                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1732                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1733                            "Attribute type of `%s` at location %d does not match VS input type of `%s`",
1734                            string_VkFormat(it_a->second->format), a_first,
1735                            describe_type(vs, it_b->second.type_id).c_str())) {
1736                    pass = false;
1737                }
1738            }
1739
1740            /* OK! */
1741            it_a++;
1742            it_b++;
1743        }
1744    }
1745
1746    return pass;
1747}
1748
1749static bool validate_fs_outputs_against_render_pass(debug_report_data *report_data, shader_module const *fs,
1750                                                    spirv_inst_iter entrypoint, RENDER_PASS_NODE const *rp, uint32_t subpass) {
1751    std::map<location_t, interface_var> outputs;
1752    std::map<uint32_t, VkFormat> color_attachments;
1753    for (auto i = 0u; i < rp->subpassColorFormats[subpass].size(); i++) {
1754        if (rp->subpassColorFormats[subpass][i] != VK_FORMAT_UNDEFINED) {
1755            color_attachments[i] = rp->subpassColorFormats[subpass][i];
1756        }
1757    }
1758
1759    bool pass = true;
1760
1761    /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
1762
1763    collect_interface_by_location(fs, entrypoint, spv::StorageClassOutput, outputs, false);
1764
1765    auto it_a = outputs.begin();
1766    auto it_b = color_attachments.begin();
1767
1768    /* Walk attachment list and outputs together */
1769
1770    while ((outputs.size() > 0 && it_a != outputs.end()) || (color_attachments.size() > 0 && it_b != color_attachments.end())) {
1771        bool a_at_end = outputs.size() == 0 || it_a == outputs.end();
1772        bool b_at_end = color_attachments.size() == 0 || it_b == color_attachments.end();
1773
1774        if (!a_at_end && (b_at_end || it_a->first.first < it_b->first)) {
1775            if (log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1776                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1777                        "FS writes to output location %d with no matching attachment", it_a->first.first)) {
1778                pass = false;
1779            }
1780            it_a++;
1781        } else if (!b_at_end && (a_at_end || it_a->first.first > it_b->first)) {
1782            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1783                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by FS", it_b->first)) {
1784                pass = false;
1785            }
1786            it_b++;
1787        } else {
1788            unsigned output_type = get_fundamental_type(fs, it_a->second.type_id);
1789            unsigned att_type = get_format_type(it_b->second);
1790
1791            /* type checking */
1792            if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
1793                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1794                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1795                            "Attachment %d of type `%s` does not match FS output type of `%s`", it_b->first,
1796                            string_VkFormat(it_b->second),
1797                            describe_type(fs, it_a->second.type_id).c_str())) {
1798                    pass = false;
1799                }
1800            }
1801
1802            /* OK! */
1803            it_a++;
1804            it_b++;
1805        }
1806    }
1807
1808    return pass;
1809}
1810
1811/* For some analyses, we need to know about all ids referenced by the static call tree of a particular
1812 * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint,
1813 * for example.
1814 * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
1815 *  - NOT the shader input/output interfaces.
1816 *
1817 * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
1818 * converting parts of this to be generated from the machine-readable spec instead.
1819 */
1820static void mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint, std::unordered_set<uint32_t> &ids) {
1821    std::unordered_set<uint32_t> worklist;
1822    worklist.insert(entrypoint.word(2));
1823
1824    while (!worklist.empty()) {
1825        auto id_iter = worklist.begin();
1826        auto id = *id_iter;
1827        worklist.erase(id_iter);
1828
1829        auto insn = src->get_def(id);
1830        if (insn == src->end()) {
1831            /* id is something we didn't collect in build_def_index. that's OK -- we'll stumble
1832             * across all kinds of things here that we may not care about. */
1833            continue;
1834        }
1835
1836        /* try to add to the output set */
1837        if (!ids.insert(id).second) {
1838            continue; /* if we already saw this id, we don't want to walk it again. */
1839        }
1840
1841        switch (insn.opcode()) {
1842        case spv::OpFunction:
1843            /* scan whole body of the function, enlisting anything interesting */
1844            while (++insn, insn.opcode() != spv::OpFunctionEnd) {
1845                switch (insn.opcode()) {
1846                case spv::OpLoad:
1847                case spv::OpAtomicLoad:
1848                case spv::OpAtomicExchange:
1849                case spv::OpAtomicCompareExchange:
1850                case spv::OpAtomicCompareExchangeWeak:
1851                case spv::OpAtomicIIncrement:
1852                case spv::OpAtomicIDecrement:
1853                case spv::OpAtomicIAdd:
1854                case spv::OpAtomicISub:
1855                case spv::OpAtomicSMin:
1856                case spv::OpAtomicUMin:
1857                case spv::OpAtomicSMax:
1858                case spv::OpAtomicUMax:
1859                case spv::OpAtomicAnd:
1860                case spv::OpAtomicOr:
1861                case spv::OpAtomicXor:
1862                    worklist.insert(insn.word(3)); /* ptr */
1863                    break;
1864                case spv::OpStore:
1865                case spv::OpAtomicStore:
1866                    worklist.insert(insn.word(1)); /* ptr */
1867                    break;
1868                case spv::OpAccessChain:
1869                case spv::OpInBoundsAccessChain:
1870                    worklist.insert(insn.word(3)); /* base ptr */
1871                    break;
1872                case spv::OpSampledImage:
1873                case spv::OpImageSampleImplicitLod:
1874                case spv::OpImageSampleExplicitLod:
1875                case spv::OpImageSampleDrefImplicitLod:
1876                case spv::OpImageSampleDrefExplicitLod:
1877                case spv::OpImageSampleProjImplicitLod:
1878                case spv::OpImageSampleProjExplicitLod:
1879                case spv::OpImageSampleProjDrefImplicitLod:
1880                case spv::OpImageSampleProjDrefExplicitLod:
1881                case spv::OpImageFetch:
1882                case spv::OpImageGather:
1883                case spv::OpImageDrefGather:
1884                case spv::OpImageRead:
1885                case spv::OpImage:
1886                case spv::OpImageQueryFormat:
1887                case spv::OpImageQueryOrder:
1888                case spv::OpImageQuerySizeLod:
1889                case spv::OpImageQuerySize:
1890                case spv::OpImageQueryLod:
1891                case spv::OpImageQueryLevels:
1892                case spv::OpImageQuerySamples:
1893                case spv::OpImageSparseSampleImplicitLod:
1894                case spv::OpImageSparseSampleExplicitLod:
1895                case spv::OpImageSparseSampleDrefImplicitLod:
1896                case spv::OpImageSparseSampleDrefExplicitLod:
1897                case spv::OpImageSparseSampleProjImplicitLod:
1898                case spv::OpImageSparseSampleProjExplicitLod:
1899                case spv::OpImageSparseSampleProjDrefImplicitLod:
1900                case spv::OpImageSparseSampleProjDrefExplicitLod:
1901                case spv::OpImageSparseFetch:
1902                case spv::OpImageSparseGather:
1903                case spv::OpImageSparseDrefGather:
1904                case spv::OpImageTexelPointer:
1905                    worklist.insert(insn.word(3)); /* image or sampled image */
1906                    break;
1907                case spv::OpImageWrite:
1908                    worklist.insert(insn.word(1)); /* image -- different operand order to above */
1909                    break;
1910                case spv::OpFunctionCall:
1911                    for (uint32_t i = 3; i < insn.len(); i++) {
1912                        worklist.insert(insn.word(i)); /* fn itself, and all args */
1913                    }
1914                    break;
1915
1916                case spv::OpExtInst:
1917                    for (uint32_t i = 5; i < insn.len(); i++) {
1918                        worklist.insert(insn.word(i)); /* operands to ext inst */
1919                    }
1920                    break;
1921                }
1922            }
1923            break;
1924        }
1925    }
1926}
1927
1928static bool validate_push_constant_block_against_pipeline(debug_report_data *report_data,
1929                                                          std::vector<VkPushConstantRange> const *pushConstantRanges,
1930                                                          shader_module const *src, spirv_inst_iter type,
1931                                                          VkShaderStageFlagBits stage) {
1932    bool pass = true;
1933
1934    /* strip off ptrs etc */
1935    type = get_struct_type(src, type, false);
1936    assert(type != src->end());
1937
1938    /* validate directly off the offsets. this isn't quite correct for arrays
1939     * and matrices, but is a good first step. TODO: arrays, matrices, weird
1940     * sizes */
1941    for (auto insn : *src) {
1942        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1943
1944            if (insn.word(3) == spv::DecorationOffset) {
1945                unsigned offset = insn.word(4);
1946                auto size = 4; /* bytes; TODO: calculate this based on the type */
1947
1948                bool found_range = false;
1949                for (auto const &range : *pushConstantRanges) {
1950                    if (range.offset <= offset && range.offset + range.size >= offset + size) {
1951                        found_range = true;
1952
1953                        if ((range.stageFlags & stage) == 0) {
1954                            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1955                                        __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
1956                                        "Push constant range covering variable starting at "
1957                                        "offset %u not accessible from stage %s",
1958                                        offset, string_VkShaderStageFlagBits(stage))) {
1959                                pass = false;
1960                            }
1961                        }
1962
1963                        break;
1964                    }
1965                }
1966
1967                if (!found_range) {
1968                    if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1969                                __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
1970                                "Push constant range covering variable starting at "
1971                                "offset %u not declared in layout",
1972                                offset)) {
1973                        pass = false;
1974                    }
1975                }
1976            }
1977        }
1978    }
1979
1980    return pass;
1981}
1982
1983static bool validate_push_constant_usage(debug_report_data *report_data,
1984                                         std::vector<VkPushConstantRange> const *pushConstantRanges, shader_module const *src,
1985                                         std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
1986    bool pass = true;
1987
1988    for (auto id : accessible_ids) {
1989        auto def_insn = src->get_def(id);
1990        if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
1991            pass &= validate_push_constant_block_against_pipeline(report_data, pushConstantRanges, src,
1992                                                                 src->get_def(def_insn.word(1)), stage);
1993        }
1994    }
1995
1996    return pass;
1997}
1998
1999// For given pipelineLayout verify that the set_layout_node at slot.first
2000//  has the requested binding at slot.second and return ptr to that binding
2001static VkDescriptorSetLayoutBinding const * get_descriptor_binding(PIPELINE_LAYOUT_NODE const *pipelineLayout, descriptor_slot_t slot) {
2002
2003    if (!pipelineLayout)
2004        return nullptr;
2005
2006    if (slot.first >= pipelineLayout->descriptorSetLayouts.size())
2007        return nullptr;
2008
2009    return pipelineLayout->setLayouts[slot.first]->GetDescriptorSetLayoutBindingPtrFromBinding(slot.second);
2010}
2011
2012// Block of code at start here for managing/tracking Pipeline state that this layer cares about
2013
2014static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
2015
2016// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
2017//   Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
2018//   to that same cmd buffer by separate thread are not changing state from underneath us
2019// Track the last cmd buffer touched by this thread
2020
2021static bool hasDrawCmd(GLOBAL_CB_NODE *pCB) {
2022    for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
2023        if (pCB->drawCount[i])
2024            return true;
2025    }
2026    return false;
2027}
2028
2029// Check object status for selected flag state
2030static bool validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
2031                            DRAW_STATE_ERROR error_code, const char *fail_msg) {
2032    if (!(pNode->status & status_mask)) {
2033        return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2034                       reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, error_code, "DS",
2035                       "CB object 0x%" PRIxLEAST64 ": %s", reinterpret_cast<const uint64_t &>(pNode->commandBuffer), fail_msg);
2036    }
2037    return false;
2038}
2039
2040// Retrieve pipeline node ptr for given pipeline object
2041static PIPELINE_NODE *getPipeline(layer_data const *my_data, VkPipeline pipeline) {
2042    auto it = my_data->pipelineMap.find(pipeline);
2043    if (it == my_data->pipelineMap.end()) {
2044        return nullptr;
2045    }
2046    return it->second;
2047}
2048
2049static RENDER_PASS_NODE *getRenderPass(layer_data const *my_data, VkRenderPass renderpass) {
2050    auto it = my_data->renderPassMap.find(renderpass);
2051    if (it == my_data->renderPassMap.end()) {
2052        return nullptr;
2053    }
2054    return it->second;
2055}
2056
2057static FRAMEBUFFER_NODE *getFramebuffer(const layer_data *my_data, VkFramebuffer framebuffer) {
2058    auto it = my_data->frameBufferMap.find(framebuffer);
2059    if (it == my_data->frameBufferMap.end()) {
2060        return nullptr;
2061    }
2062    return it->second.get();
2063}
2064
2065cvdescriptorset::DescriptorSetLayout const *getDescriptorSetLayout(layer_data const *my_data, VkDescriptorSetLayout dsLayout) {
2066    auto it = my_data->descriptorSetLayoutMap.find(dsLayout);
2067    if (it == my_data->descriptorSetLayoutMap.end()) {
2068        return nullptr;
2069    }
2070    return it->second;
2071}
2072
2073static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *my_data, VkPipelineLayout pipeLayout) {
2074    auto it = my_data->pipelineLayoutMap.find(pipeLayout);
2075    if (it == my_data->pipelineLayoutMap.end()) {
2076        return nullptr;
2077    }
2078    return &it->second;
2079}
2080
2081// Return true if for a given PSO, the given state enum is dynamic, else return false
2082static bool isDynamic(const PIPELINE_NODE *pPipeline, const VkDynamicState state) {
2083    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2084        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2085            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
2086                return true;
2087        }
2088    }
2089    return false;
2090}
2091
2092// Validate state stored as flags at time of draw call
2093static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe, bool indexedDraw) {
2094    bool result;
2095    result = validate_status(dev_data, pCB, CBSTATUS_VIEWPORT_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_VIEWPORT_NOT_BOUND,
2096                             "Dynamic viewport state not set for this command buffer");
2097    result |= validate_status(dev_data, pCB, CBSTATUS_SCISSOR_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_SCISSOR_NOT_BOUND,
2098                              "Dynamic scissor state not set for this command buffer");
2099    if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
2100        ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
2101         (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
2102        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2103                                  DRAWSTATE_LINE_WIDTH_NOT_BOUND, "Dynamic line width state not set for this command buffer");
2104    }
2105    if (pPipe->graphicsPipelineCI.pRasterizationState &&
2106        (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
2107        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2108                                  DRAWSTATE_DEPTH_BIAS_NOT_BOUND, "Dynamic depth bias state not set for this command buffer");
2109    }
2110    if (pPipe->blendConstantsEnabled) {
2111        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2112                                  DRAWSTATE_BLEND_NOT_BOUND, "Dynamic blend constants state not set for this command buffer");
2113    }
2114    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2115        (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
2116        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2117                                  DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, "Dynamic depth bounds state not set for this command buffer");
2118    }
2119    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2120        (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
2121        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2122                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil read mask state not set for this command buffer");
2123        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2124                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil write mask state not set for this command buffer");
2125        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2126                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil reference state not set for this command buffer");
2127    }
2128    if (indexedDraw) {
2129        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2130                                  DRAWSTATE_INDEX_BUFFER_NOT_BOUND,
2131                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted");
2132    }
2133    return result;
2134}
2135
2136// Verify attachment reference compatibility according to spec
2137//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2138//  If both AttachmentReference arrays have requested index, check their corresponding AttachmentDescriptions
2139//   to make sure that format and samples counts match.
2140//  If not, they are not compatible.
2141static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2142                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2143                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2144                                             const VkAttachmentDescription *pSecondaryAttachments) {
2145    // Check potential NULL cases first to avoid nullptr issues later
2146    if (pPrimary == nullptr) {
2147        if (pSecondary == nullptr) {
2148            return true;
2149        }
2150        return false;
2151    } else if (pSecondary == nullptr) {
2152        return false;
2153    }
2154    if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2155        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment)
2156            return true;
2157    } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2158        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment)
2159            return true;
2160    } else { // Format and sample count must match
2161        if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) && (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2162            return true;
2163        } else if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) || (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2164            return false;
2165        }
2166        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2167             pSecondaryAttachments[pSecondary[index].attachment].format) &&
2168            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2169             pSecondaryAttachments[pSecondary[index].attachment].samples))
2170            return true;
2171    }
2172    // Format and sample counts didn't match
2173    return false;
2174}
2175
2176// For given primary and secondary RenderPass objects, verify that they're compatible
2177static bool verify_renderpass_compatibility(const layer_data *my_data, const VkRenderPass primaryRP, const VkRenderPass secondaryRP,
2178                                            string &errorMsg) {
2179    auto primary_render_pass = getRenderPass(my_data, primaryRP);
2180    auto secondary_render_pass = getRenderPass(my_data, secondaryRP);
2181
2182    if (!primary_render_pass) {
2183        stringstream errorStr;
2184        errorStr << "invalid VkRenderPass (" << primaryRP << ")";
2185        errorMsg = errorStr.str();
2186        return false;
2187    }
2188
2189    if (!secondary_render_pass) {
2190        stringstream errorStr;
2191        errorStr << "invalid VkRenderPass (" << secondaryRP << ")";
2192        errorMsg = errorStr.str();
2193        return false;
2194    }
2195    // Trivial pass case is exact same RP
2196    if (primaryRP == secondaryRP) {
2197        return true;
2198    }
2199    const VkRenderPassCreateInfo *primaryRPCI = primary_render_pass->pCreateInfo;
2200    const VkRenderPassCreateInfo *secondaryRPCI = secondary_render_pass->pCreateInfo;
2201    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2202        stringstream errorStr;
2203        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2204                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2205        errorMsg = errorStr.str();
2206        return false;
2207    }
2208    uint32_t spIndex = 0;
2209    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2210        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2211        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2212        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2213        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2214        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2215            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2216                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2217                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2218                stringstream errorStr;
2219                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2220                errorMsg = errorStr.str();
2221                return false;
2222            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2223                                                         primaryColorCount, primaryRPCI->pAttachments,
2224                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2225                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2226                stringstream errorStr;
2227                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2228                errorMsg = errorStr.str();
2229                return false;
2230            }
2231        }
2232
2233        if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2234                                              1, primaryRPCI->pAttachments,
2235                                              secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2236                                              1, secondaryRPCI->pAttachments)) {
2237            stringstream errorStr;
2238            errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
2239            errorMsg = errorStr.str();
2240            return false;
2241        }
2242
2243        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2244        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2245        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2246        for (uint32_t i = 0; i < inputMax; ++i) {
2247            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2248                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2249                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2250                stringstream errorStr;
2251                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2252                errorMsg = errorStr.str();
2253                return false;
2254            }
2255        }
2256    }
2257    return true;
2258}
2259
2260// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
2261// pipelineLayout[layoutIndex]
2262static bool verify_set_layout_compatibility(layer_data *my_data, const cvdescriptorset::DescriptorSet *pSet,
2263                                            const VkPipelineLayout layout, const uint32_t layoutIndex, string &errorMsg) {
2264    auto pipeline_layout = getPipelineLayout(my_data, layout);
2265    if (!pipeline_layout) {
2266        stringstream errorStr;
2267        errorStr << "invalid VkPipelineLayout (" << layout << ")";
2268        errorMsg = errorStr.str();
2269        return false;
2270    }
2271    if (layoutIndex >= pipeline_layout->descriptorSetLayouts.size()) {
2272        stringstream errorStr;
2273        errorStr << "VkPipelineLayout (" << layout << ") only contains " << pipeline_layout->descriptorSetLayouts.size()
2274                 << " setLayouts corresponding to sets 0-" << pipeline_layout->descriptorSetLayouts.size() - 1
2275                 << ", but you're attempting to bind set to index " << layoutIndex;
2276        errorMsg = errorStr.str();
2277        return false;
2278    }
2279    auto layout_node = pipeline_layout->setLayouts[layoutIndex];
2280    return pSet->IsCompatible(layout_node, &errorMsg);
2281}
2282
2283// Validate that data for each specialization entry is fully contained within the buffer.
2284static bool validate_specialization_offsets(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *info) {
2285    bool pass = true;
2286
2287    VkSpecializationInfo const *spec = info->pSpecializationInfo;
2288
2289    if (spec) {
2290        for (auto i = 0u; i < spec->mapEntryCount; i++) {
2291            if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2292                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2293                            /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC",
2294                            "Specialization entry %u (for constant id %u) references memory outside provided "
2295                            "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2296                            " bytes provided)",
2297                            i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2298                            spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) {
2299
2300                    pass = false;
2301                }
2302            }
2303        }
2304    }
2305
2306    return pass;
2307}
2308
2309static bool descriptor_type_match(shader_module const *module, uint32_t type_id,
2310                                  VkDescriptorType descriptor_type, unsigned &descriptor_count) {
2311    auto type = module->get_def(type_id);
2312
2313    descriptor_count = 1;
2314
2315    /* Strip off any array or ptrs. Where we remove array levels, adjust the
2316     * descriptor count for each dimension. */
2317    while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2318        if (type.opcode() == spv::OpTypeArray) {
2319            descriptor_count *= get_constant_value(module, type.word(3));
2320            type = module->get_def(type.word(2));
2321        }
2322        else {
2323            type = module->get_def(type.word(3));
2324        }
2325    }
2326
2327    switch (type.opcode()) {
2328    case spv::OpTypeStruct: {
2329        for (auto insn : *module) {
2330            if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2331                if (insn.word(2) == spv::DecorationBlock) {
2332                    return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2333                           descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2334                } else if (insn.word(2) == spv::DecorationBufferBlock) {
2335                    return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2336                           descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2337                }
2338            }
2339        }
2340
2341        /* Invalid */
2342        return false;
2343    }
2344
2345    case spv::OpTypeSampler:
2346        return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER;
2347
2348    case spv::OpTypeSampledImage:
2349        if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
2350            /* Slight relaxation for some GLSL historical madness: samplerBuffer
2351             * doesn't really have a sampler, and a texel buffer descriptor
2352             * doesn't really provide one. Allow this slight mismatch.
2353             */
2354            auto image_type = module->get_def(type.word(2));
2355            auto dim = image_type.word(3);
2356            auto sampled = image_type.word(7);
2357            return dim == spv::DimBuffer && sampled == 1;
2358        }
2359        return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2360
2361    case spv::OpTypeImage: {
2362        /* Many descriptor types backing image types-- depends on dimension
2363         * and whether the image will be used with a sampler. SPIRV for
2364         * Vulkan requires that sampled be 1 or 2 -- leaving the decision to
2365         * runtime is unacceptable.
2366         */
2367        auto dim = type.word(3);
2368        auto sampled = type.word(7);
2369
2370        if (dim == spv::DimSubpassData) {
2371            return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2372        } else if (dim == spv::DimBuffer) {
2373            if (sampled == 1) {
2374                return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2375            } else {
2376                return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2377            }
2378        } else if (sampled == 1) {
2379            return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
2380        } else {
2381            return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2382        }
2383    }
2384
2385    /* We shouldn't really see any other junk types -- but if we do, they're
2386     * a mismatch.
2387     */
2388    default:
2389        return false; /* Mismatch */
2390    }
2391}
2392
2393static bool require_feature(debug_report_data *report_data, VkBool32 feature, char const *feature_name) {
2394    if (!feature) {
2395        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2396                    __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2397                    "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2398                    "enabled on the device",
2399                    feature_name)) {
2400            return false;
2401        }
2402    }
2403
2404    return true;
2405}
2406
2407static bool validate_shader_capabilities(debug_report_data *report_data, shader_module const *src,
2408                                         VkPhysicalDeviceFeatures const *enabledFeatures) {
2409    bool pass = true;
2410
2411
2412    for (auto insn : *src) {
2413        if (insn.opcode() == spv::OpCapability) {
2414            switch (insn.word(1)) {
2415            case spv::CapabilityMatrix:
2416            case spv::CapabilityShader:
2417            case spv::CapabilityInputAttachment:
2418            case spv::CapabilitySampled1D:
2419            case spv::CapabilityImage1D:
2420            case spv::CapabilitySampledBuffer:
2421            case spv::CapabilityImageBuffer:
2422            case spv::CapabilityImageQuery:
2423            case spv::CapabilityDerivativeControl:
2424                // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2425                break;
2426
2427            case spv::CapabilityGeometry:
2428                pass &= require_feature(report_data, enabledFeatures->geometryShader, "geometryShader");
2429                break;
2430
2431            case spv::CapabilityTessellation:
2432                pass &= require_feature(report_data, enabledFeatures->tessellationShader, "tessellationShader");
2433                break;
2434
2435            case spv::CapabilityFloat64:
2436                pass &= require_feature(report_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2437                break;
2438
2439            case spv::CapabilityInt64:
2440                pass &= require_feature(report_data, enabledFeatures->shaderInt64, "shaderInt64");
2441                break;
2442
2443            case spv::CapabilityTessellationPointSize:
2444            case spv::CapabilityGeometryPointSize:
2445                pass &= require_feature(report_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2446                                        "shaderTessellationAndGeometryPointSize");
2447                break;
2448
2449            case spv::CapabilityImageGatherExtended:
2450                pass &= require_feature(report_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2451                break;
2452
2453            case spv::CapabilityStorageImageMultisample:
2454                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2455                break;
2456
2457            case spv::CapabilityUniformBufferArrayDynamicIndexing:
2458                pass &= require_feature(report_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2459                                        "shaderUniformBufferArrayDynamicIndexing");
2460                break;
2461
2462            case spv::CapabilitySampledImageArrayDynamicIndexing:
2463                pass &= require_feature(report_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2464                                        "shaderSampledImageArrayDynamicIndexing");
2465                break;
2466
2467            case spv::CapabilityStorageBufferArrayDynamicIndexing:
2468                pass &= require_feature(report_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2469                                        "shaderStorageBufferArrayDynamicIndexing");
2470                break;
2471
2472            case spv::CapabilityStorageImageArrayDynamicIndexing:
2473                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2474                                        "shaderStorageImageArrayDynamicIndexing");
2475                break;
2476
2477            case spv::CapabilityClipDistance:
2478                pass &= require_feature(report_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2479                break;
2480
2481            case spv::CapabilityCullDistance:
2482                pass &= require_feature(report_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2483                break;
2484
2485            case spv::CapabilityImageCubeArray:
2486                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2487                break;
2488
2489            case spv::CapabilitySampleRateShading:
2490                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2491                break;
2492
2493            case spv::CapabilitySparseResidency:
2494                pass &= require_feature(report_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2495                break;
2496
2497            case spv::CapabilityMinLod:
2498                pass &= require_feature(report_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2499                break;
2500
2501            case spv::CapabilitySampledCubeArray:
2502                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2503                break;
2504
2505            case spv::CapabilityImageMSArray:
2506                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2507                break;
2508
2509            case spv::CapabilityStorageImageExtendedFormats:
2510                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageExtendedFormats,
2511                                        "shaderStorageImageExtendedFormats");
2512                break;
2513
2514            case spv::CapabilityInterpolationFunction:
2515                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2516                break;
2517
2518            case spv::CapabilityStorageImageReadWithoutFormat:
2519                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2520                                        "shaderStorageImageReadWithoutFormat");
2521                break;
2522
2523            case spv::CapabilityStorageImageWriteWithoutFormat:
2524                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2525                                        "shaderStorageImageWriteWithoutFormat");
2526                break;
2527
2528            case spv::CapabilityMultiViewport:
2529                pass &= require_feature(report_data, enabledFeatures->multiViewport, "multiViewport");
2530                break;
2531
2532            default:
2533                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2534                            __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2535                            "Shader declares capability %u, not supported in Vulkan.",
2536                            insn.word(1)))
2537                    pass = false;
2538                break;
2539            }
2540        }
2541    }
2542
2543    return pass;
2544}
2545
2546static bool validate_pipeline_shader_stage(debug_report_data *report_data,
2547                                           VkPipelineShaderStageCreateInfo const *pStage,
2548                                           PIPELINE_NODE *pipeline,
2549                                           shader_module **out_module,
2550                                           spirv_inst_iter *out_entrypoint,
2551                                           VkPhysicalDeviceFeatures const *enabledFeatures,
2552                                           std::unordered_map<VkShaderModule,
2553                                           std::unique_ptr<shader_module>> const &shaderModuleMap) {
2554    bool pass = true;
2555    auto module_it = shaderModuleMap.find(pStage->module);
2556    auto module = *out_module = module_it->second.get();
2557    pass &= validate_specialization_offsets(report_data, pStage);
2558
2559    /* find the entrypoint */
2560    auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
2561    if (entrypoint == module->end()) {
2562        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2563                    __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC",
2564                    "No entrypoint found named `%s` for stage %s", pStage->pName,
2565                    string_VkShaderStageFlagBits(pStage->stage))) {
2566            pass = false;
2567        }
2568    }
2569
2570    /* validate shader capabilities against enabled device features */
2571    pass &= validate_shader_capabilities(report_data, module, enabledFeatures);
2572
2573    /* mark accessible ids */
2574    std::unordered_set<uint32_t> accessible_ids;
2575    mark_accessible_ids(module, entrypoint, accessible_ids);
2576
2577    /* validate descriptor set layout against what the entrypoint actually uses */
2578    std::map<descriptor_slot_t, interface_var> descriptor_uses;
2579    collect_interface_by_descriptor_slot(report_data, module, accessible_ids, descriptor_uses);
2580
2581    auto pipelineLayout = pipeline->pipelineLayout;
2582
2583    /* validate push constant usage */
2584    pass &= validate_push_constant_usage(report_data, &pipelineLayout->pushConstantRanges,
2585                                        module, accessible_ids, pStage->stage);
2586
2587    /* validate descriptor use */
2588    for (auto use : descriptor_uses) {
2589        // While validating shaders capture which slots are used by the pipeline
2590        pipeline->active_slots[use.first.first].insert(use.first.second);
2591
2592        /* verify given pipelineLayout has requested setLayout with requested binding */
2593        const auto & binding = get_descriptor_binding(pipelineLayout, use.first);
2594        unsigned required_descriptor_count;
2595
2596        if (!binding) {
2597            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2598                        __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2599                        "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2600                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2601                pass = false;
2602            }
2603        } else if (~binding->stageFlags & pStage->stage) {
2604            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2605                        /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2606                        "Shader uses descriptor slot %u.%u (used "
2607                        "as type `%s`) but descriptor not "
2608                        "accessible from stage %s",
2609                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2610                        string_VkShaderStageFlagBits(pStage->stage))) {
2611                pass = false;
2612            }
2613        } else if (!descriptor_type_match(module, use.second.type_id, binding->descriptorType,
2614                                          /*out*/ required_descriptor_count)) {
2615            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2616                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC", "Type mismatch on descriptor slot "
2617                                                                       "%u.%u (used as type `%s`) but "
2618                                                                       "descriptor of type %s",
2619                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2620                        string_VkDescriptorType(binding->descriptorType))) {
2621                pass = false;
2622            }
2623        } else if (binding->descriptorCount < required_descriptor_count) {
2624            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2625                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2626                        "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2627                        required_descriptor_count, use.first.first, use.first.second,
2628                        describe_type(module, use.second.type_id).c_str(), binding->descriptorCount)) {
2629                pass = false;
2630            }
2631        }
2632    }
2633
2634    return pass;
2635}
2636
2637
2638// Validate that the shaders used by the given pipeline and store the active_slots
2639//  that are actually used by the pipeline into pPipeline->active_slots
2640static bool validate_and_capture_pipeline_shader_state(debug_report_data *report_data, PIPELINE_NODE *pPipeline,
2641                                                       VkPhysicalDeviceFeatures const *enabledFeatures,
2642                                                       std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const & shaderModuleMap) {
2643    auto pCreateInfo = pPipeline->graphicsPipelineCI.ptr();
2644    int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2645    int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2646
2647    shader_module *shaders[5];
2648    memset(shaders, 0, sizeof(shaders));
2649    spirv_inst_iter entrypoints[5];
2650    memset(entrypoints, 0, sizeof(entrypoints));
2651    VkPipelineVertexInputStateCreateInfo const *vi = 0;
2652    bool pass = true;
2653
2654    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2655        auto pStage = &pCreateInfo->pStages[i];
2656        auto stage_id = get_shader_stage_id(pStage->stage);
2657        pass &= validate_pipeline_shader_stage(report_data, pStage, pPipeline,
2658                                               &shaders[stage_id], &entrypoints[stage_id],
2659                                               enabledFeatures, shaderModuleMap);
2660    }
2661
2662    vi = pCreateInfo->pVertexInputState;
2663
2664    if (vi) {
2665        pass &= validate_vi_consistency(report_data, vi);
2666    }
2667
2668    if (shaders[vertex_stage]) {
2669        pass &= validate_vi_against_vs_inputs(report_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
2670    }
2671
2672    int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2673    int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2674
2675    while (!shaders[producer] && producer != fragment_stage) {
2676        producer++;
2677        consumer++;
2678    }
2679
2680    for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2681        assert(shaders[producer]);
2682        if (shaders[consumer]) {
2683            pass &= validate_interface_between_stages(report_data,
2684                                                      shaders[producer], entrypoints[producer], &shader_stage_attribs[producer],
2685                                                      shaders[consumer], entrypoints[consumer], &shader_stage_attribs[consumer]);
2686
2687            producer = consumer;
2688        }
2689    }
2690
2691    if (shaders[fragment_stage] && pPipeline->renderPass) {
2692        pass &= validate_fs_outputs_against_render_pass(report_data, shaders[fragment_stage], entrypoints[fragment_stage],
2693                                                        pPipeline->renderPass, pCreateInfo->subpass);
2694    }
2695
2696    return pass;
2697}
2698
2699static bool validate_compute_pipeline(debug_report_data *report_data, PIPELINE_NODE *pPipeline, VkPhysicalDeviceFeatures const *enabledFeatures,
2700                                      std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const & shaderModuleMap) {
2701    auto pCreateInfo = pPipeline->computePipelineCI.ptr();
2702
2703    shader_module *module;
2704    spirv_inst_iter entrypoint;
2705
2706    return validate_pipeline_shader_stage(report_data, &pCreateInfo->stage, pPipeline,
2707                                          &module, &entrypoint, enabledFeatures, shaderModuleMap);
2708}
2709// Return Set node ptr for specified set or else NULL
2710cvdescriptorset::DescriptorSet *getSetNode(const layer_data *my_data, VkDescriptorSet set) {
2711    auto set_it = my_data->setMap.find(set);
2712    if (set_it == my_data->setMap.end()) {
2713        return NULL;
2714    }
2715    return set_it->second;
2716}
2717// For the given command buffer, verify and update the state for activeSetBindingsPairs
2718//  This includes:
2719//  1. Verifying that any dynamic descriptor in that set has a valid dynamic offset bound.
2720//     To be valid, the dynamic offset combined with the offset and range from its
2721//     descriptor update must not overflow the size of its buffer being updated
2722//  2. Grow updateImages for given pCB to include any bound STORAGE_IMAGE descriptor images
2723//  3. Grow updateBuffers for pCB to include buffers from STORAGE*_BUFFER descriptor buffers
2724static bool validate_and_update_drawtime_descriptor_state(
2725    layer_data *dev_data, GLOBAL_CB_NODE *pCB,
2726    const vector<std::tuple<cvdescriptorset::DescriptorSet *, unordered_set<uint32_t>,
2727                            std::vector<uint32_t> const *>> &activeSetBindingsPairs) {
2728    bool result = false;
2729    for (auto set_bindings_pair : activeSetBindingsPairs) {
2730        cvdescriptorset::DescriptorSet *set_node = std::get<0>(set_bindings_pair);
2731        std::string err_str;
2732        if (!set_node->ValidateDrawState(std::get<1>(set_bindings_pair), *std::get<2>(set_bindings_pair),
2733                                         &err_str)) {
2734            // Report error here
2735            auto set = set_node->GetSet();
2736            result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2737                              reinterpret_cast<const uint64_t &>(set), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2738                              "DS 0x%" PRIxLEAST64 " encountered the following validation error at draw time: %s",
2739                              reinterpret_cast<const uint64_t &>(set), err_str.c_str());
2740        }
2741        set_node->GetStorageUpdates(std::get<1>(set_bindings_pair), &pCB->updateBuffers, &pCB->updateImages);
2742    }
2743    return result;
2744}
2745
2746// For given pipeline, return number of MSAA samples, or one if MSAA disabled
2747static VkSampleCountFlagBits getNumSamples(PIPELINE_NODE const *pipe) {
2748    if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
2749        VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
2750        return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
2751    }
2752    return VK_SAMPLE_COUNT_1_BIT;
2753}
2754
2755// Validate draw-time state related to the PSO
2756static bool validatePipelineDrawtimeState(layer_data const *my_data,
2757                                          LAST_BOUND_STATE const &state,
2758                                          const GLOBAL_CB_NODE *pCB,
2759                                          PIPELINE_NODE const *pPipeline) {
2760    bool skip_call = false;
2761
2762    // Verify Vtx binding
2763    if (pPipeline->vertexBindingDescriptions.size() > 0) {
2764        for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
2765            if ((pCB->currentDrawData.buffers.size() < (i + 1)) || (pCB->currentDrawData.buffers[i] == VK_NULL_HANDLE)) {
2766                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2767                                  __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2768                                  "The Pipeline State Object (0x%" PRIxLEAST64
2769                                  ") expects that this Command Buffer's vertex binding Index " PRINTF_SIZE_T_SPECIFIER
2770                                  " should be set via vkCmdBindVertexBuffers.",
2771                                  (uint64_t)state.pipeline, i);
2772            }
2773        }
2774    } else {
2775        if (!pCB->currentDrawData.buffers.empty()) {
2776            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
2777                              0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2778                              "Vertex buffers are bound to command buffer (0x%" PRIxLEAST64
2779                              ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIxLEAST64 ").",
2780                              (uint64_t)pCB->commandBuffer, (uint64_t)state.pipeline);
2781        }
2782    }
2783    // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
2784    // Skip check if rasterization is disabled or there is no viewport.
2785    if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
2786         (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
2787        pPipeline->graphicsPipelineCI.pViewportState) {
2788        bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
2789        bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
2790        if (dynViewport) {
2791            if (pCB->viewports.size() != pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
2792                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2793                                  __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2794                                  "Dynamic viewportCount from vkCmdSetViewport() is " PRINTF_SIZE_T_SPECIFIER
2795                                  ", but PSO viewportCount is %u. These counts must match.",
2796                                  pCB->viewports.size(), pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
2797            }
2798        }
2799        if (dynScissor) {
2800            if (pCB->scissors.size() != pPipeline->graphicsPipelineCI.pViewportState->scissorCount) {
2801                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2802                                  __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2803                                  "Dynamic scissorCount from vkCmdSetScissor() is " PRINTF_SIZE_T_SPECIFIER
2804                                  ", but PSO scissorCount is %u. These counts must match.",
2805                                  pCB->scissors.size(), pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
2806            }
2807        }
2808    }
2809
2810    // Verify that any MSAA request in PSO matches sample# in bound FB
2811    // Skip the check if rasterization is disabled.
2812    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
2813        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
2814        VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
2815        if (pCB->activeRenderPass) {
2816            const VkRenderPassCreateInfo *render_pass_info = pCB->activeRenderPass->pCreateInfo;
2817            const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
2818            VkSampleCountFlagBits subpass_num_samples = VkSampleCountFlagBits(0);
2819            uint32_t i;
2820
2821            const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
2822            if ((color_blend_state != NULL) && (pCB->activeSubpass == pPipeline->graphicsPipelineCI.subpass) &&
2823                (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount)) {
2824                skip_call |=
2825                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2826                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
2827                                "Render pass subpass %u mismatch with blending state defined and blend state attachment "
2828                                "count %u while subpass color attachment count %u in Pipeline (0x%" PRIxLEAST64 ")!  These "
2829                                "must be the same at draw-time.",
2830                                pCB->activeSubpass, color_blend_state->attachmentCount, subpass_desc->colorAttachmentCount,
2831                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
2832            }
2833
2834            for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
2835                VkSampleCountFlagBits samples;
2836
2837                if (subpass_desc->pColorAttachments[i].attachment == VK_ATTACHMENT_UNUSED)
2838                    continue;
2839
2840                samples = render_pass_info->pAttachments[subpass_desc->pColorAttachments[i].attachment].samples;
2841                if (subpass_num_samples == static_cast<VkSampleCountFlagBits>(0)) {
2842                    subpass_num_samples = samples;
2843                } else if (subpass_num_samples != samples) {
2844                    subpass_num_samples = static_cast<VkSampleCountFlagBits>(-1);
2845                    break;
2846                }
2847            }
2848            if ((subpass_desc->pDepthStencilAttachment != NULL) &&
2849                (subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
2850                const VkSampleCountFlagBits samples =
2851                        render_pass_info->pAttachments[subpass_desc->pDepthStencilAttachment->attachment].samples;
2852                if (subpass_num_samples == static_cast<VkSampleCountFlagBits>(0))
2853                    subpass_num_samples = samples;
2854                else if (subpass_num_samples != samples)
2855                    subpass_num_samples = static_cast<VkSampleCountFlagBits>(-1);
2856            }
2857
2858            if (((subpass_desc->colorAttachmentCount > 0) || (subpass_desc->pDepthStencilAttachment != NULL)) &&
2859                (pso_num_samples != subpass_num_samples)) {
2860                skip_call |=
2861                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2862                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
2863                                "Num samples mismatch! At draw-time in Pipeline (0x%" PRIxLEAST64
2864                                ") with %u samples while current RenderPass (0x%" PRIxLEAST64 ") w/ %u samples!",
2865                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), pso_num_samples,
2866                                reinterpret_cast<const uint64_t &>(pCB->activeRenderPass->renderPass), subpass_num_samples);
2867            }
2868        } else {
2869            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2870                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
2871                                 "No active render pass found at draw-time in Pipeline (0x%" PRIxLEAST64 ")!",
2872                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
2873        }
2874    }
2875    // Verify that PSO creation renderPass is compatible with active renderPass
2876    if (pCB->activeRenderPass) {
2877        std::string err_string;
2878        if (!verify_renderpass_compatibility(my_data, pCB->activeRenderPass->renderPass, pPipeline->graphicsPipelineCI.renderPass,
2879                                             err_string)) {
2880            // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
2881            skip_call |=
2882                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2883                        reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
2884                        "At Draw time the active render pass (0x%" PRIxLEAST64 ") is incompatible w/ gfx pipeline "
2885                        "(0x%" PRIxLEAST64 ") that was created w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
2886                        reinterpret_cast<uint64_t &>(pCB->activeRenderPass->renderPass), reinterpret_cast<uint64_t &>(pPipeline),
2887                        reinterpret_cast<const uint64_t &>(pPipeline->graphicsPipelineCI.renderPass), err_string.c_str());
2888        }
2889    }
2890    // TODO : Add more checks here
2891
2892    return skip_call;
2893}
2894
2895// Validate overall state at the time of a draw call
2896static bool validate_and_update_draw_state(layer_data *my_data, GLOBAL_CB_NODE *pCB, const bool indexedDraw,
2897                                           const VkPipelineBindPoint bindPoint) {
2898    bool result = false;
2899    auto const &state = pCB->lastBound[bindPoint];
2900    PIPELINE_NODE *pPipe = getPipeline(my_data, state.pipeline);
2901    if (nullptr == pPipe) {
2902        result |= log_msg(
2903            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2904            DRAWSTATE_INVALID_PIPELINE, "DS",
2905            "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
2906        // Early return as any further checks below will be busted w/o a pipeline
2907        if (result)
2908            return true;
2909    }
2910    // First check flag states
2911    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
2912        result = validate_draw_state_flags(my_data, pCB, pPipe, indexedDraw);
2913
2914    // Now complete other state checks
2915    if (state.pipelineLayout) {
2916        string errorString;
2917        auto pipelineLayout = (bindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) ? pPipe->graphicsPipelineCI.layout : pPipe->computePipelineCI.layout;
2918
2919        // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets
2920        vector<std::tuple<cvdescriptorset::DescriptorSet *, unordered_set<uint32_t>, std::vector<uint32_t> const *>> activeSetBindingsPairs;
2921        for (auto & setBindingPair : pPipe->active_slots) {
2922            uint32_t setIndex = setBindingPair.first;
2923            // If valid set is not bound throw an error
2924            if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
2925                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2926                                  DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
2927                                  "VkPipeline 0x%" PRIxLEAST64 " uses set #%u but that set is not bound.", (uint64_t)pPipe->pipeline,
2928                                  setIndex);
2929            } else if (!verify_set_layout_compatibility(my_data, state.boundDescriptorSets[setIndex],
2930                                                        pipelineLayout, setIndex, errorString)) {
2931                // Set is bound but not compatible w/ overlapping pipelineLayout from PSO
2932                VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
2933                result |=
2934                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2935                            (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
2936                            "VkDescriptorSet (0x%" PRIxLEAST64
2937                            ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIxLEAST64 " due to: %s",
2938                            (uint64_t)setHandle, setIndex, (uint64_t)pipelineLayout, errorString.c_str());
2939            } else { // Valid set is bound and layout compatible, validate that it's updated
2940                // Pull the set node
2941                cvdescriptorset::DescriptorSet *pSet = state.boundDescriptorSets[setIndex];
2942                // Save vector of all active sets to verify dynamicOffsets below
2943                activeSetBindingsPairs.push_back(std::make_tuple(pSet, setBindingPair.second,
2944                                                                 &state.dynamicOffsets[setIndex]));
2945                // Make sure set has been updated if it has no immutable samplers
2946                //  If it has immutable samplers, we'll flag error later as needed depending on binding
2947                if (!pSet->IsUpdated()) {
2948                    for (auto binding : setBindingPair.second) {
2949                        if (!pSet->GetImmutableSamplerPtrFromBinding(binding)) {
2950                            result |= log_msg(
2951                                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2952                                (uint64_t)pSet->GetSet(), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2953                                "DS 0x%" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so "
2954                                "this will result in undefined behavior.",
2955                                (uint64_t)pSet->GetSet());
2956                        }
2957                    }
2958                }
2959            }
2960        }
2961        // For given active slots, verify any dynamic descriptors and record updated images & buffers
2962        result |= validate_and_update_drawtime_descriptor_state(my_data, pCB, activeSetBindingsPairs);
2963    }
2964
2965    // Check general pipeline state that needs to be validated at drawtime
2966    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
2967        result |= validatePipelineDrawtimeState(my_data, state, pCB, pPipe);
2968
2969    return result;
2970}
2971
2972// Validate HW line width capabilities prior to setting requested line width.
2973static bool verifyLineWidth(layer_data *my_data, DRAW_STATE_ERROR dsError, const uint64_t &target, float lineWidth) {
2974    bool skip_call = false;
2975
2976    // First check to see if the physical device supports wide lines.
2977    if ((VK_FALSE == my_data->phys_dev_properties.features.wideLines) && (1.0f != lineWidth)) {
2978        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target, __LINE__,
2979                             dsError, "DS", "Attempt to set lineWidth to %f but physical device wideLines feature "
2980                                            "not supported/enabled so lineWidth must be 1.0f!",
2981                             lineWidth);
2982    } else {
2983        // Otherwise, make sure the width falls in the valid range.
2984        if ((my_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) ||
2985            (my_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) {
2986            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target,
2987                                 __LINE__, dsError, "DS", "Attempt to set lineWidth to %f but physical device limits line width "
2988                                                          "to between [%f, %f]!",
2989                                 lineWidth, my_data->phys_dev_properties.properties.limits.lineWidthRange[0],
2990                                 my_data->phys_dev_properties.properties.limits.lineWidthRange[1]);
2991        }
2992    }
2993
2994    return skip_call;
2995}
2996
2997// Verify that create state for a pipeline is valid
2998static bool verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_NODE *> pPipelines,
2999                                      int pipelineIndex) {
3000    bool skipCall = false;
3001
3002    PIPELINE_NODE *pPipeline = pPipelines[pipelineIndex];
3003
3004    // If create derivative bit is set, check that we've specified a base
3005    // pipeline correctly, and that the base pipeline was created to allow
3006    // derivatives.
3007    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
3008        PIPELINE_NODE *pBasePipeline = nullptr;
3009        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
3010              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
3011            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3012                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3013                                "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
3014        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
3015            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
3016                skipCall |=
3017                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3018                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3019                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
3020            } else {
3021                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
3022            }
3023        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
3024            pBasePipeline = getPipeline(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
3025        }
3026
3027        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
3028            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3029                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3030                                "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
3031        }
3032    }
3033
3034    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
3035        if (!my_data->phys_dev_properties.features.independentBlend) {
3036            if (pPipeline->attachments.size() > 1) {
3037                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
3038                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
3039                    if ((pAttachments[0].blendEnable != pAttachments[i].blendEnable) ||
3040                        (pAttachments[0].srcColorBlendFactor != pAttachments[i].srcColorBlendFactor) ||
3041                        (pAttachments[0].dstColorBlendFactor != pAttachments[i].dstColorBlendFactor) ||
3042                        (pAttachments[0].colorBlendOp != pAttachments[i].colorBlendOp) ||
3043                        (pAttachments[0].srcAlphaBlendFactor != pAttachments[i].srcAlphaBlendFactor) ||
3044                        (pAttachments[0].dstAlphaBlendFactor != pAttachments[i].dstAlphaBlendFactor) ||
3045                        (pAttachments[0].alphaBlendOp != pAttachments[i].alphaBlendOp) ||
3046                        (pAttachments[0].colorWriteMask != pAttachments[i].colorWriteMask)) {
3047                        skipCall |=
3048                            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3049                            DRAWSTATE_INDEPENDENT_BLEND, "DS", "Invalid Pipeline CreateInfo: If independent blend feature not "
3050                            "enabled, all elements of pAttachments must be identical");
3051                    }
3052                }
3053            }
3054        }
3055        if (!my_data->phys_dev_properties.features.logicOp &&
3056            (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
3057            skipCall |=
3058                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3059                        DRAWSTATE_DISABLED_LOGIC_OP, "DS",
3060                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE");
3061        }
3062        if ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable == VK_TRUE) &&
3063            ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOp < VK_LOGIC_OP_CLEAR) ||
3064             (pPipeline->graphicsPipelineCI.pColorBlendState->logicOp > VK_LOGIC_OP_SET))) {
3065            skipCall |=
3066                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3067                        DRAWSTATE_INVALID_LOGIC_OP, "DS",
3068                        "Invalid Pipeline CreateInfo: If logicOpEnable is VK_TRUE, logicOp must be a valid VkLogicOp value");
3069        }
3070    }
3071
3072    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
3073    // produces nonsense errors that confuse users. Other layers should already
3074    // emit errors for renderpass being invalid.
3075    auto renderPass = getRenderPass(my_data, pPipeline->graphicsPipelineCI.renderPass);
3076    if (renderPass &&
3077        pPipeline->graphicsPipelineCI.subpass >= renderPass->pCreateInfo->subpassCount) {
3078        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3079                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
3080                                                                           "is out of range for this renderpass (0..%u)",
3081                            pPipeline->graphicsPipelineCI.subpass, renderPass->pCreateInfo->subpassCount - 1);
3082    }
3083
3084    if (!validate_and_capture_pipeline_shader_state(my_data->report_data, pPipeline, &my_data->phys_dev_properties.features,
3085                                                    my_data->shaderModuleMap)) {
3086        skipCall = true;
3087    }
3088    // Each shader's stage must be unique
3089    if (pPipeline->duplicate_shaders) {
3090        for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
3091            if (pPipeline->duplicate_shaders & stage) {
3092                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3093                                    __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3094                                    "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
3095                                    string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
3096            }
3097        }
3098    }
3099    // VS is required
3100    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3101        skipCall |=
3102            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3103                    DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vtx Shader required");
3104    }
3105    // Either both or neither TC/TE shaders should be defined
3106    if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) !=
3107        ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) {
3108        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3109                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3110                            "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair");
3111    }
3112    // Compute shaders should be specified independent of Gfx shaders
3113    if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) &&
3114        (pPipeline->active_shaders &
3115         (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
3116          VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) {
3117        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3118                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3119                            "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline");
3120    }
3121    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3122    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3123    if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3124        (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
3125         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3126        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3127                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3128                                                                           "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3129                                                                           "topology for tessellation pipelines");
3130    }
3131    if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
3132        pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3133        if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3134            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3135                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3136                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3137                                                                               "topology is only valid for tessellation pipelines");
3138        }
3139        if (!pPipeline->graphicsPipelineCI.pTessellationState) {
3140            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3141                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3142                                "Invalid Pipeline CreateInfo State: "
3143                                "pTessellationState is NULL when VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3144                                "topology used. pTessellationState must not be NULL in this case.");
3145        } else if (!pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints ||
3146                   (pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints > 32)) {
3147            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3148                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3149                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3150                                                                               "topology used with patchControlPoints value %u."
3151                                                                               " patchControlPoints should be >0 and <=32.",
3152                                pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints);
3153        }
3154    }
3155    // If a rasterization state is provided, make sure that the line width conforms to the HW.
3156    if (pPipeline->graphicsPipelineCI.pRasterizationState) {
3157        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) {
3158            skipCall |= verifyLineWidth(my_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, reinterpret_cast<uint64_t &>(pPipeline),
3159                                        pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth);
3160        }
3161    }
3162    // Viewport state must be included if rasterization is enabled.
3163    // If the viewport state is included, the viewport and scissor counts should always match.
3164    // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler
3165    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3166        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3167        if (!pPipeline->graphicsPipelineCI.pViewportState) {
3168            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3169                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport "
3170                                                                           "and scissors are dynamic PSO must include "
3171                                                                           "viewportCount and scissorCount in pViewportState.");
3172        } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount !=
3173                   pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
3174            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3175                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3176                                "Gfx Pipeline viewport count (%u) must match scissor count (%u).",
3177                                pPipeline->graphicsPipelineCI.pViewportState->viewportCount,
3178                                pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3179        } else {
3180            // If viewport or scissor are not dynamic, then verify that data is appropriate for count
3181            bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3182            bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3183            if (!dynViewport) {
3184                if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount &&
3185                    !pPipeline->graphicsPipelineCI.pViewportState->pViewports) {
3186                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3187                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3188                                        "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you "
3189                                        "must either include pViewports data, or include viewport in pDynamicState and set it with "
3190                                        "vkCmdSetViewport().",
3191                                        pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
3192                }
3193            }
3194            if (!dynScissor) {
3195                if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount &&
3196                    !pPipeline->graphicsPipelineCI.pViewportState->pScissors) {
3197                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3198                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3199                                        "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you "
3200                                        "must either include pScissors data, or include scissor in pDynamicState and set it with "
3201                                        "vkCmdSetScissor().",
3202                                        pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3203                }
3204            }
3205        }
3206    }
3207    return skipCall;
3208}
3209
3210// Free the Pipeline nodes
3211static void deletePipelines(layer_data *my_data) {
3212    if (my_data->pipelineMap.size() <= 0)
3213        return;
3214    for (auto &pipe_map_pair : my_data->pipelineMap) {
3215        delete pipe_map_pair.second;
3216    }
3217    my_data->pipelineMap.clear();
3218}
3219
3220// Block of code at start here specifically for managing/tracking DSs
3221
3222// Return Pool node ptr for specified pool or else NULL
3223DESCRIPTOR_POOL_NODE *getPoolNode(const layer_data *dev_data, const VkDescriptorPool pool) {
3224    auto pool_it = dev_data->descriptorPoolMap.find(pool);
3225    if (pool_it == dev_data->descriptorPoolMap.end()) {
3226        return NULL;
3227    }
3228    return pool_it->second;
3229}
3230
3231// Return false if update struct is of valid type, otherwise flag error and return code from callback
3232static bool validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3233    switch (pUpdateStruct->sType) {
3234    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3235    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3236        return false;
3237    default:
3238        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3239                       DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3240                       "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3241                       string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3242    }
3243}
3244
3245// Set count for given update struct in the last parameter
3246static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3247    switch (pUpdateStruct->sType) {
3248    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3249        return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3250    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3251        // TODO : Need to understand this case better and make sure code is correct
3252        return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3253    default:
3254        return 0;
3255    }
3256}
3257
3258// For given layout and update, return the first overall index of the layout that is updated
3259static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3260                                    const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3261    return binding_start_index + arrayIndex;
3262}
3263// For given layout and update, return the last overall index of the layout that is updated
3264static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3265                                  const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3266    uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3267    return binding_start_index + arrayIndex + count - 1;
3268}
3269// Verify that the descriptor type in the update struct matches what's expected by the layout
3270static bool validateUpdateConsistency(layer_data *my_data, const VkDevice device, const VkDescriptorType layout_type,
3271                                      const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3272    // First get actual type of update
3273    bool skipCall = false;
3274    VkDescriptorType actualType = VK_DESCRIPTOR_TYPE_MAX_ENUM;
3275    switch (pUpdateStruct->sType) {
3276    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3277        actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3278        break;
3279    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3280        /* no need to validate */
3281        return false;
3282        break;
3283    default:
3284        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3285                            DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3286                            "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3287                            string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3288    }
3289    if (!skipCall) {
3290        if (layout_type != actualType) {
3291            skipCall |= log_msg(
3292                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3293                DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3294                "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3295                string_VkDescriptorType(actualType), string_VkDescriptorType(layout_type));
3296        }
3297    }
3298    return skipCall;
3299}
3300//TODO: Consolidate functions
3301bool FindLayout(const GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) {
3302    layer_data *my_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
3303    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3304        return false;
3305    }
3306    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3307    imgpair.subresource.aspectMask = aspectMask;
3308    auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3309    if (imgsubIt == pCB->imageLayoutMap.end()) {
3310        return false;
3311    }
3312    if (node.layout != VK_IMAGE_LAYOUT_MAX_ENUM && node.layout != imgsubIt->second.layout) {
3313        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3314                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3315                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3316                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.layout), string_VkImageLayout(imgsubIt->second.layout));
3317    }
3318    if (node.initialLayout != VK_IMAGE_LAYOUT_MAX_ENUM && node.initialLayout != imgsubIt->second.initialLayout) {
3319        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3320                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3321                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple initial layout types: %s and %s",
3322                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.initialLayout), string_VkImageLayout(imgsubIt->second.initialLayout));
3323    }
3324    node = imgsubIt->second;
3325    return true;
3326}
3327
3328bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) {
3329    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3330        return false;
3331    }
3332    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3333    imgpair.subresource.aspectMask = aspectMask;
3334    auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3335    if (imgsubIt == my_data->imageLayoutMap.end()) {
3336        return false;
3337    }
3338    if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) {
3339        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3340                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3341                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3342                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(layout), string_VkImageLayout(imgsubIt->second.layout));
3343    }
3344    layout = imgsubIt->second.layout;
3345    return true;
3346}
3347
3348// find layout(s) on the cmd buf level
3349bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3350    ImageSubresourcePair imgpair = {image, true, range};
3351    node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM);
3352    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT);
3353    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_DEPTH_BIT);
3354    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_STENCIL_BIT);
3355    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_METADATA_BIT);
3356    if (node.layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3357        imgpair = {image, false, VkImageSubresource()};
3358        auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3359        if (imgsubIt == pCB->imageLayoutMap.end())
3360            return false;
3361        node = imgsubIt->second;
3362    }
3363    return true;
3364}
3365
3366// find layout(s) on the global level
3367bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3368    layout = VK_IMAGE_LAYOUT_MAX_ENUM;
3369    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3370    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3371    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3372    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3373    if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3374        imgpair = {imgpair.image, false, VkImageSubresource()};
3375        auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3376        if (imgsubIt == my_data->imageLayoutMap.end())
3377            return false;
3378        layout = imgsubIt->second.layout;
3379    }
3380    return true;
3381}
3382
3383bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3384    ImageSubresourcePair imgpair = {image, true, range};
3385    return FindLayout(my_data, imgpair, layout);
3386}
3387
3388bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3389    auto sub_data = my_data->imageSubresourceMap.find(image);
3390    if (sub_data == my_data->imageSubresourceMap.end())
3391        return false;
3392    auto img_node = getImageNode(my_data, image);
3393    if (!img_node)
3394        return false;
3395    bool ignoreGlobal = false;
3396    // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3397    // potential errors in this case.
3398    if (sub_data->second.size() >= (img_node->createInfo.arrayLayers * img_node->createInfo.mipLevels + 1)) {
3399        ignoreGlobal = true;
3400    }
3401    for (auto imgsubpair : sub_data->second) {
3402        if (ignoreGlobal && !imgsubpair.hasSubresource)
3403            continue;
3404        auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3405        if (img_data != my_data->imageLayoutMap.end()) {
3406            layouts.push_back(img_data->second.layout);
3407        }
3408    }
3409    return true;
3410}
3411
3412// Set the layout on the global level
3413void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3414    VkImage &image = imgpair.image;
3415    // TODO (mlentine): Maybe set format if new? Not used atm.
3416    my_data->imageLayoutMap[imgpair].layout = layout;
3417    // TODO (mlentine): Maybe make vector a set?
3418    auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3419    if (subresource == my_data->imageSubresourceMap[image].end()) {
3420        my_data->imageSubresourceMap[image].push_back(imgpair);
3421    }
3422}
3423
3424// Set the layout on the cmdbuf level
3425void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3426    pCB->imageLayoutMap[imgpair] = node;
3427    // TODO (mlentine): Maybe make vector a set?
3428    auto subresource =
3429        std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair);
3430    if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) {
3431        pCB->imageSubresourceMap[imgpair.image].push_back(imgpair);
3432    }
3433}
3434
3435void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3436    // TODO (mlentine): Maybe make vector a set?
3437    if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) !=
3438        pCB->imageSubresourceMap[imgpair.image].end()) {
3439        pCB->imageLayoutMap[imgpair].layout = layout;
3440    } else {
3441        // TODO (mlentine): Could be expensive and might need to be removed.
3442        assert(imgpair.hasSubresource);
3443        IMAGE_CMD_BUF_LAYOUT_NODE node;
3444        if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) {
3445            node.initialLayout = layout;
3446        }
3447        SetLayout(pCB, imgpair, {node.initialLayout, layout});
3448    }
3449}
3450
3451template <class OBJECT, class LAYOUT>
3452void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
3453    if (imgpair.subresource.aspectMask & aspectMask) {
3454        imgpair.subresource.aspectMask = aspectMask;
3455        SetLayout(pObject, imgpair, layout);
3456    }
3457}
3458
3459template <class OBJECT, class LAYOUT>
3460void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
3461    ImageSubresourcePair imgpair = {image, true, range};
3462    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3463    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3464    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3465    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3466}
3467
3468template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) {
3469    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3470    SetLayout(pObject, image, imgpair, layout);
3471}
3472
3473void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
3474    auto iv_data = getImageViewData(dev_data, imageView);
3475    assert(iv_data);
3476    const VkImage &image = iv_data->image;
3477    const VkImageSubresourceRange &subRange = iv_data->subresourceRange;
3478    // TODO: Do not iterate over every possibility - consolidate where possible
3479    for (uint32_t j = 0; j < subRange.levelCount; j++) {
3480        uint32_t level = subRange.baseMipLevel + j;
3481        for (uint32_t k = 0; k < subRange.layerCount; k++) {
3482            uint32_t layer = subRange.baseArrayLayer + k;
3483            VkImageSubresource sub = {subRange.aspectMask, level, layer};
3484            SetLayout(pCB, image, sub, layout);
3485        }
3486    }
3487}
3488
3489// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3490// func_str is the name of the calling function
3491// Return false if no errors occur
3492// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
3493static bool validateIdleDescriptorSet(const layer_data *my_data, VkDescriptorSet set, std::string func_str) {
3494    bool skip_call = false;
3495    auto set_node = my_data->setMap.find(set);
3496    if (set_node == my_data->setMap.end()) {
3497        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3498                             (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3499                             "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3500                             (uint64_t)(set));
3501    } else {
3502        if (set_node->second->in_use.load()) {
3503            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3504                                 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_OBJECT_INUSE,
3505                                 "DS", "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that is in use by a command buffer.",
3506                                 func_str.c_str(), (uint64_t)(set));
3507        }
3508    }
3509    return skip_call;
3510}
3511
3512// Remove set from setMap and delete the set
3513static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
3514    dev_data->setMap.erase(descriptor_set->GetSet());
3515    delete descriptor_set;
3516}
3517// Free all DS Pools including their Sets & related sub-structs
3518// NOTE : Calls to this function should be wrapped in mutex
3519static void deletePools(layer_data *my_data) {
3520    if (my_data->descriptorPoolMap.size() <= 0)
3521        return;
3522    for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
3523        // Remove this pools' sets from setMap and delete them
3524        for (auto ds : (*ii).second->sets) {
3525            freeDescriptorSet(my_data, ds);
3526        }
3527        (*ii).second->sets.clear();
3528    }
3529    my_data->descriptorPoolMap.clear();
3530}
3531
3532static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
3533                                VkDescriptorPoolResetFlags flags) {
3534    DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pool);
3535    if (!pPool) {
3536        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
3537                (uint64_t)pool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
3538                "Unable to find pool node for pool 0x%" PRIxLEAST64 " specified in vkResetDescriptorPool() call", (uint64_t)pool);
3539    } else {
3540        // TODO: validate flags
3541        // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
3542        for (auto ds : pPool->sets) {
3543            freeDescriptorSet(my_data, ds);
3544        }
3545        pPool->sets.clear();
3546        // Reset available count for each type and available sets for this pool
3547        for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
3548            pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
3549        }
3550        pPool->availableSets = pPool->maxSets;
3551    }
3552}
3553
3554// For given CB object, fetch associated CB Node from map
3555static GLOBAL_CB_NODE *getCBNode(layer_data const *my_data, const VkCommandBuffer cb) {
3556    auto it = my_data->commandBufferMap.find(cb);
3557    if (it == my_data->commandBufferMap.end()) {
3558        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3559                reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3560                "Attempt to use CommandBuffer 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb));
3561        return NULL;
3562    }
3563    return it->second;
3564}
3565// Free all CB Nodes
3566// NOTE : Calls to this function should be wrapped in mutex
3567static void deleteCommandBuffers(layer_data *my_data) {
3568    if (my_data->commandBufferMap.empty()) {
3569        return;
3570    }
3571    for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
3572        delete (*ii).second;
3573    }
3574    my_data->commandBufferMap.clear();
3575}
3576
3577static bool report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
3578    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3579                   (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
3580                   "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
3581}
3582
3583bool validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
3584    if (!pCB->activeRenderPass)
3585        return false;
3586    bool skip_call = false;
3587    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
3588        (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
3589        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3590                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3591                             "Commands cannot be called in a subpass using secondary command buffers.");
3592    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
3593        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3594                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3595                             "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
3596    }
3597    return skip_call;
3598}
3599
3600static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3601    if (!(flags & VK_QUEUE_GRAPHICS_BIT))
3602        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3603                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3604                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3605    return false;
3606}
3607
3608static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3609    if (!(flags & VK_QUEUE_COMPUTE_BIT))
3610        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3611                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3612                       "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
3613    return false;
3614}
3615
3616static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3617    if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
3618        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3619                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3620                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3621    return false;
3622}
3623
3624// Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not
3625//  in the recording state or if there's an issue with the Cmd ordering
3626static bool addCmd(layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
3627    bool skipCall = false;
3628    auto pPool = getCommandPoolNode(my_data, pCB->createInfo.commandPool);
3629    if (pPool) {
3630        VkQueueFlags flags = my_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].queueFlags;
3631        switch (cmd) {
3632        case CMD_BINDPIPELINE:
3633        case CMD_BINDPIPELINEDELTA:
3634        case CMD_BINDDESCRIPTORSETS:
3635        case CMD_FILLBUFFER:
3636        case CMD_CLEARCOLORIMAGE:
3637        case CMD_SETEVENT:
3638        case CMD_RESETEVENT:
3639        case CMD_WAITEVENTS:
3640        case CMD_BEGINQUERY:
3641        case CMD_ENDQUERY:
3642        case CMD_RESETQUERYPOOL:
3643        case CMD_COPYQUERYPOOLRESULTS:
3644        case CMD_WRITETIMESTAMP:
3645            skipCall |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3646            break;
3647        case CMD_SETVIEWPORTSTATE:
3648        case CMD_SETSCISSORSTATE:
3649        case CMD_SETLINEWIDTHSTATE:
3650        case CMD_SETDEPTHBIASSTATE:
3651        case CMD_SETBLENDSTATE:
3652        case CMD_SETDEPTHBOUNDSSTATE:
3653        case CMD_SETSTENCILREADMASKSTATE:
3654        case CMD_SETSTENCILWRITEMASKSTATE:
3655        case CMD_SETSTENCILREFERENCESTATE:
3656        case CMD_BINDINDEXBUFFER:
3657        case CMD_BINDVERTEXBUFFER:
3658        case CMD_DRAW:
3659        case CMD_DRAWINDEXED:
3660        case CMD_DRAWINDIRECT:
3661        case CMD_DRAWINDEXEDINDIRECT:
3662        case CMD_BLITIMAGE:
3663        case CMD_CLEARATTACHMENTS:
3664        case CMD_CLEARDEPTHSTENCILIMAGE:
3665        case CMD_RESOLVEIMAGE:
3666        case CMD_BEGINRENDERPASS:
3667        case CMD_NEXTSUBPASS:
3668        case CMD_ENDRENDERPASS:
3669            skipCall |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
3670            break;
3671        case CMD_DISPATCH:
3672        case CMD_DISPATCHINDIRECT:
3673            skipCall |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3674            break;
3675        case CMD_COPYBUFFER:
3676        case CMD_COPYIMAGE:
3677        case CMD_COPYBUFFERTOIMAGE:
3678        case CMD_COPYIMAGETOBUFFER:
3679        case CMD_CLONEIMAGEDATA:
3680        case CMD_UPDATEBUFFER:
3681        case CMD_PIPELINEBARRIER:
3682        case CMD_EXECUTECOMMANDS:
3683        case CMD_END:
3684            break;
3685        default:
3686            break;
3687        }
3688    }
3689    if (pCB->state != CB_RECORDING) {
3690        skipCall |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
3691    } else {
3692        skipCall |= validateCmdsInCmdBuffer(my_data, pCB, cmd);
3693        CMD_NODE cmdNode = {};
3694        // init cmd node and append to end of cmd LL
3695        cmdNode.cmdNumber = ++pCB->numCmds;
3696        cmdNode.type = cmd;
3697        pCB->cmds.push_back(cmdNode);
3698    }
3699    return skipCall;
3700}
3701// Reset the command buffer state
3702//  Maintain the createInfo and set state to CB_NEW, but clear all other state
3703static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
3704    GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
3705    if (pCB) {
3706        pCB->in_use.store(0);
3707        pCB->cmds.clear();
3708        // Reset CB state (note that createInfo is not cleared)
3709        pCB->commandBuffer = cb;
3710        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
3711        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
3712        pCB->numCmds = 0;
3713        memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
3714        pCB->state = CB_NEW;
3715        pCB->submitCount = 0;
3716        pCB->status = 0;
3717        pCB->viewports.clear();
3718        pCB->scissors.clear();
3719
3720        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
3721            // Before clearing lastBoundState, remove any CB bindings from all uniqueBoundSets
3722            for (auto set : pCB->lastBound[i].uniqueBoundSets) {
3723                set->RemoveBoundCommandBuffer(pCB);
3724            }
3725            pCB->lastBound[i].reset();
3726        }
3727
3728        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
3729        pCB->activeRenderPass = nullptr;
3730        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
3731        pCB->activeSubpass = 0;
3732        pCB->destroyedSets.clear();
3733        pCB->updatedSets.clear();
3734        pCB->destroyedFramebuffers.clear();
3735        pCB->waitedEvents.clear();
3736        pCB->events.clear();
3737        pCB->writeEventsBeforeWait.clear();
3738        pCB->waitedEventsBeforeQueryReset.clear();
3739        pCB->queryToStateMap.clear();
3740        pCB->activeQueries.clear();
3741        pCB->startedQueries.clear();
3742        pCB->imageSubresourceMap.clear();
3743        pCB->imageLayoutMap.clear();
3744        pCB->eventToStageMap.clear();
3745        pCB->drawData.clear();
3746        pCB->currentDrawData.buffers.clear();
3747        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
3748        // Make sure any secondaryCommandBuffers are removed from globalInFlight
3749        for (auto secondary_cb : pCB->secondaryCommandBuffers) {
3750            dev_data->globalInFlightCmdBuffers.erase(secondary_cb);
3751        }
3752        pCB->secondaryCommandBuffers.clear();
3753        pCB->updateImages.clear();
3754        pCB->updateBuffers.clear();
3755        clear_cmd_buf_and_mem_references(dev_data, pCB);
3756        pCB->eventUpdates.clear();
3757        pCB->queryUpdates.clear();
3758
3759        // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
3760        for (auto framebuffer : pCB->framebuffers) {
3761            auto fbNode = getFramebuffer(dev_data, framebuffer);
3762            if (fbNode)
3763                fbNode->referencingCmdBuffers.erase(pCB->commandBuffer);
3764        }
3765        pCB->framebuffers.clear();
3766        pCB->activeFramebuffer = VK_NULL_HANDLE;
3767    }
3768}
3769
3770// Set PSO-related status bits for CB, including dynamic state set via PSO
3771static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe) {
3772    // Account for any dynamic state not set via this PSO
3773    if (!pPipe->graphicsPipelineCI.pDynamicState ||
3774        !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) { // All state is static
3775        pCB->status = CBSTATUS_ALL;
3776    } else {
3777        // First consider all state on
3778        // Then unset any state that's noted as dynamic in PSO
3779        // Finally OR that into CB statemask
3780        CBStatusFlags psoDynStateMask = CBSTATUS_ALL;
3781        for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
3782            switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) {
3783            case VK_DYNAMIC_STATE_VIEWPORT:
3784                psoDynStateMask &= ~CBSTATUS_VIEWPORT_SET;
3785                break;
3786            case VK_DYNAMIC_STATE_SCISSOR:
3787                psoDynStateMask &= ~CBSTATUS_SCISSOR_SET;
3788                break;
3789            case VK_DYNAMIC_STATE_LINE_WIDTH:
3790                psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
3791                break;
3792            case VK_DYNAMIC_STATE_DEPTH_BIAS:
3793                psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
3794                break;
3795            case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
3796                psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
3797                break;
3798            case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
3799                psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
3800                break;
3801            case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
3802                psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
3803                break;
3804            case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
3805                psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
3806                break;
3807            case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
3808                psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
3809                break;
3810            default:
3811                // TODO : Flag error here
3812                break;
3813            }
3814        }
3815        pCB->status |= psoDynStateMask;
3816    }
3817}
3818
3819// Print the last bound Gfx Pipeline
3820static bool printPipeline(layer_data *my_data, const VkCommandBuffer cb) {
3821    bool skipCall = false;
3822    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
3823    if (pCB) {
3824        PIPELINE_NODE *pPipeTrav = getPipeline(my_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
3825        if (!pPipeTrav) {
3826            // nothing to print
3827        } else {
3828            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3829                                __LINE__, DRAWSTATE_NONE, "DS", "%s",
3830                                vk_print_vkgraphicspipelinecreateinfo(
3831                                    reinterpret_cast<const VkGraphicsPipelineCreateInfo *>(&pPipeTrav->graphicsPipelineCI), "{DS}")
3832                                    .c_str());
3833        }
3834    }
3835    return skipCall;
3836}
3837
3838static void printCB(layer_data *my_data, const VkCommandBuffer cb) {
3839    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
3840    if (pCB && pCB->cmds.size() > 0) {
3841        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3842                DRAWSTATE_NONE, "DS", "Cmds in CB 0x%p", (void *)cb);
3843        vector<CMD_NODE> cmds = pCB->cmds;
3844        for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) {
3845            // TODO : Need to pass cb as srcObj here
3846            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
3847                    __LINE__, DRAWSTATE_NONE, "DS", "  CMD 0x%" PRIx64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str());
3848        }
3849    } else {
3850        // Nothing to print
3851    }
3852}
3853
3854static bool synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
3855    bool skipCall = false;
3856    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
3857        return skipCall;
3858    }
3859    skipCall |= printPipeline(my_data, cb);
3860    return skipCall;
3861}
3862
3863// Flags validation error if the associated call is made inside a render pass. The apiName
3864// routine should ONLY be called outside a render pass.
3865static bool insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
3866    bool inside = false;
3867    if (pCB->activeRenderPass) {
3868        inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3869                         (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
3870                         "%s: It is invalid to issue this call inside an active render pass (0x%" PRIxLEAST64 ")", apiName,
3871                         (uint64_t)pCB->activeRenderPass->renderPass);
3872    }
3873    return inside;
3874}
3875
3876// Flags validation error if the associated call is made outside a render pass. The apiName
3877// routine should ONLY be called inside a render pass.
3878static bool outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
3879    bool outside = false;
3880    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
3881        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
3882         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
3883        outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3884                          (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS",
3885                          "%s: This call must be issued inside an active render pass.", apiName);
3886    }
3887    return outside;
3888}
3889
3890static void init_core_validation(layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
3891
3892    layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
3893
3894}
3895
3896VKAPI_ATTR VkResult VKAPI_CALL
3897CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
3898    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
3899
3900    assert(chain_info->u.pLayerInfo);
3901    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
3902    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
3903    if (fpCreateInstance == NULL)
3904        return VK_ERROR_INITIALIZATION_FAILED;
3905
3906    // Advance the link info for the next element on the chain
3907    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
3908
3909    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
3910    if (result != VK_SUCCESS)
3911        return result;
3912
3913    layer_data *instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
3914    instance_data->instance = *pInstance;
3915    instance_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
3916    layer_init_instance_dispatch_table(*pInstance, instance_data->instance_dispatch_table, fpGetInstanceProcAddr);
3917
3918    instance_data->report_data =
3919        debug_report_create_instance(instance_data->instance_dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount,
3920                                     pCreateInfo->ppEnabledExtensionNames);
3921
3922    init_core_validation(instance_data, pAllocator);
3923
3924    ValidateLayerOrdering(*pCreateInfo);
3925
3926    return result;
3927}
3928
3929/* hook DestroyInstance to remove tableInstanceMap entry */
3930VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
3931    // TODOSC : Shouldn't need any customization here
3932    dispatch_key key = get_dispatch_key(instance);
3933    // TBD: Need any locking this early, in case this function is called at the
3934    // same time by more than one thread?
3935    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
3936    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
3937    pTable->DestroyInstance(instance, pAllocator);
3938
3939    std::lock_guard<std::mutex> lock(global_lock);
3940    // Clean up logging callback, if any
3941    while (my_data->logging_callback.size() > 0) {
3942        VkDebugReportCallbackEXT callback = my_data->logging_callback.back();
3943        layer_destroy_msg_callback(my_data->report_data, callback, pAllocator);
3944        my_data->logging_callback.pop_back();
3945    }
3946
3947    layer_debug_report_destroy_instance(my_data->report_data);
3948    delete my_data->instance_dispatch_table;
3949    layer_data_map.erase(key);
3950}
3951
3952static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
3953    uint32_t i;
3954    // TBD: Need any locking, in case this function is called at the same time
3955    // by more than one thread?
3956    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
3957    dev_data->device_extensions.wsi_enabled = false;
3958
3959    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
3960    PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
3961    pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
3962    pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
3963    pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
3964    pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
3965    pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
3966
3967    for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
3968        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
3969            dev_data->device_extensions.wsi_enabled = true;
3970    }
3971}
3972
3973VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
3974                                            const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
3975    layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
3976    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
3977
3978    assert(chain_info->u.pLayerInfo);
3979    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
3980    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
3981    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice");
3982    if (fpCreateDevice == NULL) {
3983        return VK_ERROR_INITIALIZATION_FAILED;
3984    }
3985
3986    // Advance the link info for the next element on the chain
3987    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
3988
3989    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
3990    if (result != VK_SUCCESS) {
3991        return result;
3992    }
3993
3994    std::unique_lock<std::mutex> lock(global_lock);
3995    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
3996
3997    // Setup device dispatch table
3998    my_device_data->device_dispatch_table = new VkLayerDispatchTable;
3999    layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr);
4000    my_device_data->device = *pDevice;
4001
4002    my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
4003    createDeviceRegisterExtensions(pCreateInfo, *pDevice);
4004    // Get physical device limits for this device
4005    my_instance_data->instance_dispatch_table->GetPhysicalDeviceProperties(gpu, &(my_device_data->phys_dev_properties.properties));
4006    uint32_t count;
4007    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
4008    my_device_data->phys_dev_properties.queue_family_properties.resize(count);
4009    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(
4010        gpu, &count, &my_device_data->phys_dev_properties.queue_family_properties[0]);
4011    // TODO: device limits should make sure these are compatible
4012    if (pCreateInfo->pEnabledFeatures) {
4013        my_device_data->phys_dev_properties.features = *pCreateInfo->pEnabledFeatures;
4014    } else {
4015        memset(&my_device_data->phys_dev_properties.features, 0, sizeof(VkPhysicalDeviceFeatures));
4016    }
4017    // Store physical device mem limits into device layer_data struct
4018    my_instance_data->instance_dispatch_table->GetPhysicalDeviceMemoryProperties(gpu, &my_device_data->phys_dev_mem_props);
4019    lock.unlock();
4020
4021    ValidateLayerOrdering(*pCreateInfo);
4022
4023    return result;
4024}
4025
4026// prototype
4027static void deleteRenderPasses(layer_data *);
4028VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
4029    // TODOSC : Shouldn't need any customization here
4030    dispatch_key key = get_dispatch_key(device);
4031    layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4032    // Free all the memory
4033    std::unique_lock<std::mutex> lock(global_lock);
4034    deletePipelines(dev_data);
4035    deleteRenderPasses(dev_data);
4036    deleteCommandBuffers(dev_data);
4037    // This will also delete all sets in the pool & remove them from setMap
4038    deletePools(dev_data);
4039    // All sets should be removed
4040    assert(dev_data->setMap.empty());
4041    for (auto del_layout : dev_data->descriptorSetLayoutMap) {
4042        delete del_layout.second;
4043    }
4044    dev_data->descriptorSetLayoutMap.clear();
4045    dev_data->imageViewMap.clear();
4046    dev_data->imageMap.clear();
4047    dev_data->imageSubresourceMap.clear();
4048    dev_data->imageLayoutMap.clear();
4049    dev_data->bufferViewMap.clear();
4050    dev_data->bufferMap.clear();
4051    // Queues persist until device is destroyed
4052    dev_data->queueMap.clear();
4053    lock.unlock();
4054#if MTMERGESOURCE
4055    bool skipCall = false;
4056    lock.lock();
4057    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4058            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()");
4059    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4060            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================");
4061    print_mem_list(dev_data);
4062    printCBList(dev_data);
4063    // Report any memory leaks
4064    DEVICE_MEM_INFO *pInfo = NULL;
4065    if (!dev_data->memObjMap.empty()) {
4066        for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
4067            pInfo = (*ii).second.get();
4068            if (pInfo->allocInfo.allocationSize != 0) {
4069                // Valid Usage: All child objects created on device must have been destroyed prior to destroying device
4070                skipCall |=
4071                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4072                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK,
4073                            "MEM", "Mem Object 0x%" PRIx64 " has not been freed. You should clean up this memory by calling "
4074                                   "vkFreeMemory(0x%" PRIx64 ") prior to vkDestroyDevice().",
4075                            (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem));
4076            }
4077        }
4078    }
4079    layer_debug_report_destroy_device(device);
4080    lock.unlock();
4081
4082#if DISPATCH_MAP_DEBUG
4083    fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
4084#endif
4085    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4086    if (!skipCall) {
4087        pDisp->DestroyDevice(device, pAllocator);
4088    }
4089#else
4090    dev_data->device_dispatch_table->DestroyDevice(device, pAllocator);
4091#endif
4092    delete dev_data->device_dispatch_table;
4093    layer_data_map.erase(key);
4094}
4095
4096static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4097
4098// This validates that the initial layout specified in the command buffer for
4099// the IMAGE is the same
4100// as the global IMAGE layout
4101static bool ValidateCmdBufImageLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4102    bool skip_call = false;
4103    for (auto cb_image_data : pCB->imageLayoutMap) {
4104        VkImageLayout imageLayout;
4105        if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4106            skip_call |=
4107                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4108                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image 0x%" PRIx64 ".",
4109                        reinterpret_cast<const uint64_t &>(cb_image_data.first));
4110        } else {
4111            if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4112                // TODO: Set memory invalid which is in mem_tracker currently
4113            } else if (imageLayout != cb_image_data.second.initialLayout) {
4114                if (cb_image_data.first.hasSubresource) {
4115                    skip_call |= log_msg(
4116                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4117                        reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4118                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") [sub-resource: aspectMask 0x%X array layer %u, mip level %u], "
4119                        "with layout %s when first use is %s.",
4120                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), cb_image_data.first.subresource.aspectMask,
4121                                cb_image_data.first.subresource.arrayLayer,
4122                                cb_image_data.first.subresource.mipLevel, string_VkImageLayout(imageLayout),
4123                        string_VkImageLayout(cb_image_data.second.initialLayout));
4124                } else {
4125                    skip_call |= log_msg(
4126                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4127                        reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4128                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") with layout %s when "
4129                        "first use is %s.",
4130                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout),
4131                        string_VkImageLayout(cb_image_data.second.initialLayout));
4132                }
4133            }
4134            SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4135        }
4136    }
4137    return skip_call;
4138}
4139
4140// Track which resources are in-flight by atomically incrementing their "in_use" count
4141static bool validateAndIncrementResources(layer_data *my_data, GLOBAL_CB_NODE *pCB, std::vector<VkSemaphore> const &semaphores) {
4142    bool skip_call = false;
4143    for (auto drawDataElement : pCB->drawData) {
4144        for (auto buffer : drawDataElement.buffers) {
4145            auto buffer_node = getBufferNode(my_data, buffer);
4146            if (!buffer_node) {
4147                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4148                                     (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4149                                     "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", (uint64_t)(buffer));
4150            } else {
4151                buffer_node->in_use.fetch_add(1);
4152            }
4153        }
4154    }
4155    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4156        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4157            if (!my_data->setMap.count(set->GetSet())) {
4158                skip_call |=
4159                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4160                            (uint64_t)(set), __LINE__, DRAWSTATE_INVALID_DESCRIPTOR_SET, "DS",
4161                            "Cannot submit cmd buffer using deleted descriptor set 0x%" PRIx64 ".", (uint64_t)(set));
4162            } else {
4163                set->in_use.fetch_add(1);
4164            }
4165        }
4166    }
4167    for (auto semaphore : semaphores) {
4168        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4169        if (semaphoreNode == my_data->semaphoreMap.end()) {
4170            skip_call |=
4171                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4172                        reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
4173                        "Cannot submit cmd buffer using deleted semaphore 0x%" PRIx64 ".", reinterpret_cast<uint64_t &>(semaphore));
4174        } else {
4175            semaphoreNode->second.in_use.fetch_add(1);
4176        }
4177    }
4178    for (auto event : pCB->events) {
4179        auto eventNode = my_data->eventMap.find(event);
4180        if (eventNode == my_data->eventMap.end()) {
4181            skip_call |=
4182                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4183                        reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
4184                        "Cannot submit cmd buffer using deleted event 0x%" PRIx64 ".", reinterpret_cast<uint64_t &>(event));
4185        } else {
4186            eventNode->second.in_use.fetch_add(1);
4187        }
4188    }
4189    for (auto event : pCB->writeEventsBeforeWait) {
4190        auto eventNode = my_data->eventMap.find(event);
4191        eventNode->second.write_in_use++;
4192    }
4193    return skip_call;
4194}
4195
4196// Note: This function assumes that the global lock is held by the calling
4197// thread.
4198static bool cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) {
4199    bool skip_call = false;
4200    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
4201    if (pCB) {
4202        for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) {
4203            for (auto event : queryEventsPair.second) {
4204                if (my_data->eventMap[event].needsSignaled) {
4205                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4206                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
4207                                         "Cannot get query results on queryPool 0x%" PRIx64
4208                                         " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
4209                                         (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
4210                }
4211            }
4212        }
4213    }
4214    return skip_call;
4215}
4216// Decrement cmd_buffer in_use and if it goes to 0 remove cmd_buffer from globalInFlightCmdBuffers
4217static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer) {
4218    // Pull it off of global list initially, but if we find it in any other queue list, add it back in
4219    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmd_buffer);
4220    pCB->in_use.fetch_sub(1);
4221    if (!pCB->in_use.load()) {
4222        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
4223    }
4224}
4225
4226static void decrementResources(layer_data *my_data, CB_SUBMISSION *submission) {
4227    GLOBAL_CB_NODE *pCB = getCBNode(my_data, submission->cb);
4228    for (auto drawDataElement : pCB->drawData) {
4229        for (auto buffer : drawDataElement.buffers) {
4230            auto buffer_node = getBufferNode(my_data, buffer);
4231            if (buffer_node) {
4232                buffer_node->in_use.fetch_sub(1);
4233            }
4234        }
4235    }
4236    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4237        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4238            set->in_use.fetch_sub(1);
4239        }
4240    }
4241    for (auto semaphore : submission->semaphores) {
4242        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4243        if (semaphoreNode != my_data->semaphoreMap.end()) {
4244            semaphoreNode->second.in_use.fetch_sub(1);
4245        }
4246    }
4247    for (auto event : pCB->events) {
4248        auto eventNode = my_data->eventMap.find(event);
4249        if (eventNode != my_data->eventMap.end()) {
4250            eventNode->second.in_use.fetch_sub(1);
4251        }
4252    }
4253    for (auto event : pCB->writeEventsBeforeWait) {
4254        auto eventNode = my_data->eventMap.find(event);
4255        if (eventNode != my_data->eventMap.end()) {
4256            eventNode->second.write_in_use--;
4257        }
4258    }
4259    for (auto queryStatePair : pCB->queryToStateMap) {
4260        my_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4261    }
4262    for (auto eventStagePair : pCB->eventToStageMap) {
4263        my_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4264    }
4265}
4266// For fenceCount fences in pFences, mark fence signaled, decrement in_use, and call
4267//  decrementResources for all priorFences and cmdBuffers associated with fence.
4268static bool decrementResources(layer_data *my_data, uint32_t fenceCount, const VkFence *pFences) {
4269    bool skip_call = false;
4270    std::vector<std::pair<VkFence, FENCE_NODE *>> fence_pairs;
4271    for (uint32_t i = 0; i < fenceCount; ++i) {
4272        auto pFence = getFenceNode(my_data, pFences[i]);
4273        if (!pFence || pFence->state != FENCE_INFLIGHT)
4274            continue;
4275
4276        fence_pairs.emplace_back(pFences[i], pFence);
4277        pFence->state = FENCE_RETIRED;
4278
4279        decrementResources(my_data, static_cast<uint32_t>(pFence->priorFences.size()),
4280                           pFence->priorFences.data());
4281        for (auto & submission : pFence->submissions) {
4282            decrementResources(my_data, &submission);
4283            skip_call |= cleanInFlightCmdBuffer(my_data, submission.cb);
4284            removeInFlightCmdBuffer(my_data, submission.cb);
4285        }
4286        pFence->submissions.clear();
4287        pFence->priorFences.clear();
4288    }
4289    for (auto fence_pair : fence_pairs) {
4290        for (auto queue : fence_pair.second->queues) {
4291            auto pQueue = getQueueNode(my_data, queue);
4292            if (pQueue) {
4293                auto last_fence_data =
4294                    std::find(pQueue->lastFences.begin(), pQueue->lastFences.end(), fence_pair.first);
4295                if (last_fence_data != pQueue->lastFences.end())
4296                    pQueue->lastFences.erase(last_fence_data);
4297            }
4298        }
4299        for (auto& fence_data : my_data->fenceMap) {
4300          auto prior_fence_data =
4301              std::find(fence_data.second.priorFences.begin(), fence_data.second.priorFences.end(), fence_pair.first);
4302          if (prior_fence_data != fence_data.second.priorFences.end())
4303              fence_data.second.priorFences.erase(prior_fence_data);
4304        }
4305    }
4306    return skip_call;
4307}
4308// Decrement in_use for all outstanding cmd buffers that were submitted on this queue
4309static bool decrementResources(layer_data *my_data, VkQueue queue) {
4310    bool skip_call = false;
4311    auto queue_data = my_data->queueMap.find(queue);
4312    if (queue_data != my_data->queueMap.end()) {
4313        for (auto & submission : queue_data->second.untrackedSubmissions) {
4314            decrementResources(my_data, &submission);
4315            skip_call |= cleanInFlightCmdBuffer(my_data, submission.cb);
4316            removeInFlightCmdBuffer(my_data, submission.cb);
4317        }
4318        queue_data->second.untrackedSubmissions.clear();
4319        skip_call |= decrementResources(my_data, static_cast<uint32_t>(queue_data->second.lastFences.size()),
4320                                        queue_data->second.lastFences.data());
4321    }
4322    return skip_call;
4323}
4324
4325// This function merges command buffer tracking between queues when there is a semaphore dependency
4326// between them (see below for details as to how tracking works). When this happens, the prior
4327// fences from the signaling queue are merged into the wait queue as well as any untracked command
4328// buffers.
4329static void updateTrackedCommandBuffers(layer_data *dev_data, VkQueue queue, VkQueue other_queue, VkFence fence) {
4330    if (queue == other_queue) {
4331        return;
4332    }
4333    auto queue_data = dev_data->queueMap.find(queue);
4334    auto other_queue_data = dev_data->queueMap.find(other_queue);
4335    if (queue_data == dev_data->queueMap.end() || other_queue_data == dev_data->queueMap.end()) {
4336        return;
4337    }
4338    for (auto fenceInner : other_queue_data->second.lastFences) {
4339        queue_data->second.lastFences.push_back(fenceInner);
4340        auto fence_node = dev_data->fenceMap.find(fenceInner);
4341        if (fence_node != dev_data->fenceMap.end()) {
4342            fence_node->second.queues.insert(other_queue_data->first);
4343        }
4344    }
4345    // TODO: Stealing the untracked CBs out of the signaling queue isn't really
4346    // correct. A subsequent submission + wait, or a QWI on that queue, or
4347    // another semaphore dependency to a third queue may /all/ provide
4348    // suitable proof that the work we're stealing here has completed on the
4349    // device, but we've lost that information by moving the tracking between
4350    // queues.
4351    if (fence != VK_NULL_HANDLE) {
4352        auto fence_data = dev_data->fenceMap.find(fence);
4353        if (fence_data == dev_data->fenceMap.end()) {
4354            return;
4355        }
4356        for (auto cmdbuffer : other_queue_data->second.untrackedSubmissions) {
4357            fence_data->second.submissions.push_back(cmdbuffer);
4358        }
4359        other_queue_data->second.untrackedSubmissions.clear();
4360    } else {
4361        for (auto cmdbuffer : other_queue_data->second.untrackedSubmissions) {
4362            queue_data->second.untrackedSubmissions.push_back(cmdbuffer);
4363        }
4364        other_queue_data->second.untrackedSubmissions.clear();
4365    }
4366    for (auto eventStagePair : other_queue_data->second.eventToStageMap) {
4367        queue_data->second.eventToStageMap[eventStagePair.first] = eventStagePair.second;
4368    }
4369    for (auto queryStatePair : other_queue_data->second.queryToStateMap) {
4370        queue_data->second.queryToStateMap[queryStatePair.first] = queryStatePair.second;
4371    }
4372}
4373
4374// This is the core function for tracking command buffers. There are two primary ways command
4375// buffers are tracked. When submitted they are stored in the command buffer list associated
4376// with a fence or the untracked command buffer list associated with a queue if no fence is used.
4377// Each queue also stores the last fence that was submitted onto the queue. This allows us to
4378// create a linked list of fences and their associated command buffers so if one fence is
4379// waited on, prior fences on that queue are also considered to have been waited on. When a fence is
4380// waited on (either via a queue, device or fence), we free the cmd buffers for that fence and
4381// recursively call with the prior fences.
4382
4383
4384// Submit a fence to a queue, delimiting previous fences and previous untracked
4385// work by it.
4386static void
4387SubmitFence(QUEUE_NODE *pQueue, FENCE_NODE *pFence)
4388{
4389    assert(!pFence->priorFences.size());
4390    assert(!pFence->submissions.size());
4391
4392    std::swap(pFence->priorFences, pQueue->lastFences);
4393    std::swap(pFence->submissions, pQueue->untrackedSubmissions);
4394
4395    pFence->queues.insert(pQueue->queue);
4396    pFence->state = FENCE_INFLIGHT;
4397
4398    pQueue->lastFences.push_back(pFence->fence);
4399}
4400
4401static void markCommandBuffersInFlight(layer_data *my_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
4402                                       VkFence fence) {
4403    auto queue_data = my_data->queueMap.find(queue);
4404    if (queue_data != my_data->queueMap.end()) {
4405        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4406            const VkSubmitInfo *submit = &pSubmits[submit_idx];
4407            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
4408                // Add cmdBuffers to the global set and increment count
4409                GLOBAL_CB_NODE *pCB = getCBNode(my_data, submit->pCommandBuffers[i]);
4410                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
4411                    my_data->globalInFlightCmdBuffers.insert(secondaryCmdBuffer);
4412                    GLOBAL_CB_NODE *pSubCB = getCBNode(my_data, secondaryCmdBuffer);
4413                    pSubCB->in_use.fetch_add(1);
4414                }
4415                my_data->globalInFlightCmdBuffers.insert(submit->pCommandBuffers[i]);
4416                pCB->in_use.fetch_add(1);
4417            }
4418        }
4419    }
4420}
4421
4422static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4423    bool skip_call = false;
4424    if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
4425        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4426        skip_call |=
4427            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4428                    __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
4429                    "Command Buffer 0x%" PRIx64 " is already in use and is not marked for simultaneous use.",
4430                    reinterpret_cast<uint64_t>(pCB->commandBuffer));
4431    }
4432    return skip_call;
4433}
4434
4435static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4436    bool skipCall = false;
4437    // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
4438    if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
4439        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4440                            __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4441                            "CB 0x%" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
4442                            "set, but has been submitted 0x%" PRIxLEAST64 " times.",
4443                            (uint64_t)(pCB->commandBuffer), pCB->submitCount);
4444    }
4445    // Validate that cmd buffers have been updated
4446    if (CB_RECORDED != pCB->state) {
4447        if (CB_INVALID == pCB->state) {
4448            // Inform app of reason CB invalid
4449            bool causeReported = false;
4450            if (!pCB->destroyedSets.empty()) {
4451                std::stringstream set_string;
4452                for (auto set : pCB->destroyedSets)
4453                    set_string << " " << set;
4454
4455                skipCall |=
4456                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4457                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4458                            "You are submitting command buffer 0x%" PRIxLEAST64
4459                            " that is invalid because it had the following bound descriptor set(s) destroyed: %s",
4460                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
4461                causeReported = true;
4462            }
4463            if (!pCB->updatedSets.empty()) {
4464                std::stringstream set_string;
4465                for (auto set : pCB->updatedSets)
4466                    set_string << " " << set;
4467
4468                skipCall |=
4469                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4470                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4471                            "You are submitting command buffer 0x%" PRIxLEAST64
4472                            " that is invalid because it had the following bound descriptor set(s) updated: %s",
4473                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
4474                causeReported = true;
4475            }
4476            if (!pCB->destroyedFramebuffers.empty()) {
4477                std::stringstream fb_string;
4478                for (auto fb : pCB->destroyedFramebuffers)
4479                    fb_string << " " << fb;
4480
4481                skipCall |=
4482                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4483                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4484                            "You are submitting command buffer 0x%" PRIxLEAST64 " that is invalid because it had the following "
4485                            "referenced framebuffers destroyed: %s",
4486                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), fb_string.str().c_str());
4487                causeReported = true;
4488            }
4489            // TODO : This is defensive programming to make sure an error is
4490            //  flagged if we hit this INVALID cmd buffer case and none of the
4491            //  above cases are hit. As the number of INVALID cases grows, this
4492            //  code should be updated to seemlessly handle all the cases.
4493            if (!causeReported) {
4494                skipCall |= log_msg(
4495                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4496                    reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4497                    "You are submitting command buffer 0x%" PRIxLEAST64 " that is invalid due to an unknown cause. Validation "
4498                    "should "
4499                    "be improved to report the exact cause.",
4500                    reinterpret_cast<uint64_t &>(pCB->commandBuffer));
4501            }
4502        } else { // Flag error for using CB w/o vkEndCommandBuffer() called
4503            skipCall |=
4504                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4505                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
4506                        "You must call vkEndCommandBuffer() on CB 0x%" PRIxLEAST64 " before this call to vkQueueSubmit()!",
4507                        (uint64_t)(pCB->commandBuffer));
4508        }
4509    }
4510    return skipCall;
4511}
4512
4513static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, std::vector<VkSemaphore> const &semaphores) {
4514    // Track in-use for resources off of primary and any secondary CBs
4515    bool skipCall = validateAndIncrementResources(dev_data, pCB, semaphores);
4516    if (!pCB->secondaryCommandBuffers.empty()) {
4517        for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
4518            skipCall |= validateAndIncrementResources(dev_data, dev_data->commandBufferMap[secondaryCmdBuffer], semaphores);
4519            GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
4520            if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
4521                !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4522                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4523                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4524                        "CB 0x%" PRIxLEAST64 " was submitted with secondary buffer 0x%" PRIxLEAST64
4525                        " but that buffer has subsequently been bound to "
4526                        "primary cmd buffer 0x%" PRIxLEAST64
4527                        " and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
4528                        reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer),
4529                        reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer));
4530            }
4531        }
4532    }
4533    skipCall |= validateCommandBufferState(dev_data, pCB);
4534    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
4535    // on device
4536    skipCall |= validateCommandBufferSimultaneousUse(dev_data, pCB);
4537    return skipCall;
4538}
4539
4540static bool
4541ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence)
4542{
4543    bool skipCall = false;
4544
4545    if (pFence) {
4546        if (pFence->state == FENCE_INFLIGHT) {
4547            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4548                                (uint64_t)(pFence->fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
4549                                "Fence 0x%" PRIx64 " is already in use by another submission.", (uint64_t)(pFence->fence));
4550        }
4551
4552        else if (pFence->state == FENCE_RETIRED) {
4553            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4554                                reinterpret_cast<uint64_t &>(pFence->fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4555                                "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
4556                                reinterpret_cast<uint64_t &>(pFence->fence));
4557        }
4558    }
4559
4560    return skipCall;
4561}
4562
4563
4564VKAPI_ATTR VkResult VKAPI_CALL
4565QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
4566    bool skipCall = false;
4567    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
4568    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4569    std::unique_lock<std::mutex> lock(global_lock);
4570
4571    auto pQueue = getQueueNode(dev_data, queue);
4572    auto pFence = getFenceNode(dev_data, fence);
4573    skipCall |= ValidateFenceForSubmit(dev_data, pFence);
4574
4575    if (skipCall) {
4576        return VK_ERROR_VALIDATION_FAILED_EXT;
4577    }
4578
4579    // TODO : Review these old print functions and clean up as appropriate
4580    print_mem_list(dev_data);
4581    printCBList(dev_data);
4582
4583    // Mark the fence in-use.
4584    if (pFence) {
4585        SubmitFence(pQueue, pFence);
4586    }
4587
4588    // If a fence is supplied, all the command buffers for this call will be
4589    // delimited by that fence. Otherwise, they go in the untracked portion of
4590    // the queue, and may end up being delimited by a fence supplied in a
4591    // subsequent submission.
4592    auto & submitTarget = pFence ? pFence->submissions : pQueue->untrackedSubmissions;
4593
4594    // Now verify each individual submit
4595    std::unordered_set<VkQueue> processed_other_queues;
4596    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4597        const VkSubmitInfo *submit = &pSubmits[submit_idx];
4598        vector<VkSemaphore> semaphoreList;
4599        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
4600            VkSemaphore semaphore = submit->pWaitSemaphores[i];
4601            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4602            semaphoreList.push_back(semaphore);
4603            if (pSemaphore) {
4604                if (pSemaphore->signaled) {
4605                    pSemaphore->signaled = false;
4606                } else {
4607                    skipCall |=
4608                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4609                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
4610                                "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
4611                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
4612                }
4613                VkQueue other_queue = pSemaphore->queue;
4614                if (other_queue != VK_NULL_HANDLE && !processed_other_queues.count(other_queue)) {
4615                    updateTrackedCommandBuffers(dev_data, queue, other_queue, fence);
4616                    processed_other_queues.insert(other_queue);
4617                }
4618            }
4619        }
4620        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
4621            VkSemaphore semaphore = submit->pSignalSemaphores[i];
4622            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4623            if (pSemaphore) {
4624                semaphoreList.push_back(semaphore);
4625                if (pSemaphore->signaled) {
4626                    skipCall |=
4627                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4628                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
4629                                "Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
4630                                " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
4631                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore),
4632                                reinterpret_cast<uint64_t &>(dev_data->semaphoreMap[semaphore].queue));
4633                } else {
4634                    pSemaphore->signaled = true;
4635                    pSemaphore->queue = queue;
4636                }
4637            }
4638        }
4639
4640        // TODO: just add one submission per VkSubmitInfo!
4641        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
4642            auto pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
4643            skipCall |= ValidateCmdBufImageLayouts(dev_data, pCBNode);
4644            if (pCBNode) {
4645
4646                submitTarget.emplace_back(pCBNode->commandBuffer, semaphoreList);
4647                for (auto secondaryCmdBuffer : pCBNode->secondaryCommandBuffers) {
4648                    submitTarget.emplace_back(secondaryCmdBuffer, semaphoreList);
4649                }
4650
4651                pCBNode->submitCount++; // increment submit count
4652                skipCall |= validatePrimaryCommandBufferState(dev_data, pCBNode, semaphoreList);
4653                // Call submit-time functions to validate/update state
4654                for (auto &function : pCBNode->validate_functions) {
4655                    skipCall |= function();
4656                }
4657                for (auto &function : pCBNode->eventUpdates) {
4658                    skipCall |= function(queue);
4659                }
4660                for (auto &function : pCBNode->queryUpdates) {
4661                    skipCall |= function(queue);
4662                }
4663            }
4664        }
4665    }
4666    markCommandBuffersInFlight(dev_data, queue, submitCount, pSubmits, fence);
4667    lock.unlock();
4668    if (!skipCall)
4669        result = dev_data->device_dispatch_table->QueueSubmit(queue, submitCount, pSubmits, fence);
4670
4671    return result;
4672}
4673
4674VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
4675                                              const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
4676    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4677    VkResult result = my_data->device_dispatch_table->AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
4678    // TODO : Track allocations and overall size here
4679    std::lock_guard<std::mutex> lock(global_lock);
4680    add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo);
4681    print_mem_list(my_data);
4682    return result;
4683}
4684
4685VKAPI_ATTR void VKAPI_CALL
4686FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
4687    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4688
4689    // From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed.
4690    // Before freeing a memory object, an application must ensure the memory object is no longer
4691    // in use by the device—for example by command buffers queued for execution. The memory need
4692    // not yet be unbound from all images and buffers, but any further use of those images or
4693    // buffers (on host or device) for anything other than destroying those objects will result in
4694    // undefined behavior.
4695
4696    std::unique_lock<std::mutex> lock(global_lock);
4697    freeMemObjInfo(my_data, device, mem, false);
4698    print_mem_list(my_data);
4699    printCBList(my_data);
4700    lock.unlock();
4701    my_data->device_dispatch_table->FreeMemory(device, mem, pAllocator);
4702}
4703
4704static bool validateMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
4705    bool skipCall = false;
4706
4707    if (size == 0) {
4708        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4709                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4710                           "VkMapMemory: Attempting to map memory range of size zero");
4711    }
4712
4713    auto mem_element = my_data->memObjMap.find(mem);
4714    if (mem_element != my_data->memObjMap.end()) {
4715        auto mem_info = mem_element->second.get();
4716        // It is an application error to call VkMapMemory on an object that is already mapped
4717        if (mem_info->memRange.size != 0) {
4718            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4719                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4720                               "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIxLEAST64, (uint64_t)mem);
4721        }
4722
4723        // Validate that offset + size is within object's allocationSize
4724        if (size == VK_WHOLE_SIZE) {
4725            if (offset >= mem_info->allocInfo.allocationSize) {
4726                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4727                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
4728                                   "MEM", "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
4729                                          " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
4730                                   offset, mem_info->allocInfo.allocationSize, mem_info->allocInfo.allocationSize);
4731            }
4732        } else {
4733            if ((offset + size) > mem_info->allocInfo.allocationSize) {
4734                skipCall =
4735                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4736                            (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4737                            "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64, offset,
4738                            size + offset, mem_info->allocInfo.allocationSize);
4739            }
4740        }
4741    }
4742    return skipCall;
4743}
4744
4745static void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
4746    auto mem_info = getMemObjInfo(my_data, mem);
4747    if (mem_info) {
4748        mem_info->memRange.offset = offset;
4749        mem_info->memRange.size = size;
4750    }
4751}
4752
4753static bool deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
4754    bool skipCall = false;
4755    auto mem_info = getMemObjInfo(my_data, mem);
4756    if (mem_info) {
4757        if (!mem_info->memRange.size) {
4758            // Valid Usage: memory must currently be mapped
4759            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4760                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4761                               "Unmapping Memory without memory being mapped: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
4762        }
4763        mem_info->memRange.size = 0;
4764        if (mem_info->pData) {
4765            free(mem_info->pData);
4766            mem_info->pData = 0;
4767        }
4768    }
4769    return skipCall;
4770}
4771
4772static char NoncoherentMemoryFillValue = 0xb;
4773
4774static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize size, void **ppData) {
4775    auto mem_info = getMemObjInfo(dev_data, mem);
4776    if (mem_info) {
4777        mem_info->pDriverData = *ppData;
4778        uint32_t index = mem_info->allocInfo.memoryTypeIndex;
4779        if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
4780            mem_info->pData = 0;
4781        } else {
4782            if (size == VK_WHOLE_SIZE) {
4783                size = mem_info->allocInfo.allocationSize;
4784            }
4785            size_t convSize = (size_t)(size);
4786            mem_info->pData = malloc(2 * convSize);
4787            memset(mem_info->pData, NoncoherentMemoryFillValue, 2 * convSize);
4788            *ppData = static_cast<char *>(mem_info->pData) + (convSize / 2);
4789        }
4790    }
4791}
4792// Verify that state for fence being waited on is appropriate. That is,
4793//  a fence being waited on should not already be signalled and
4794//  it should have been submitted on a queue or during acquire next image
4795static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
4796    bool skipCall = false;
4797
4798    auto pFence = getFenceNode(dev_data, fence);
4799    if (pFence) {
4800        if (pFence->state == FENCE_UNSIGNALED) {
4801            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4802                                reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4803                                "%s called for fence 0x%" PRIxLEAST64 " which has not been submitted on a Queue or during "
4804                                "acquire next image.",
4805                                apiCall, reinterpret_cast<uint64_t &>(fence));
4806        }
4807    }
4808    return skipCall;
4809}
4810
4811VKAPI_ATTR VkResult VKAPI_CALL
4812WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
4813    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4814    bool skip_call = false;
4815    // Verify fence status of submitted fences
4816    std::unique_lock<std::mutex> lock(global_lock);
4817    for (uint32_t i = 0; i < fenceCount; i++) {
4818        skip_call |= verifyWaitFenceState(dev_data, pFences[i], "vkWaitForFences");
4819    }
4820    lock.unlock();
4821    if (skip_call)
4822        return VK_ERROR_VALIDATION_FAILED_EXT;
4823
4824    VkResult result = dev_data->device_dispatch_table->WaitForFences(device, fenceCount, pFences, waitAll, timeout);
4825
4826    if (result == VK_SUCCESS) {
4827        lock.lock();
4828        // When we know that all fences are complete we can clean/remove their CBs
4829        if (waitAll || fenceCount == 1) {
4830            skip_call |= decrementResources(dev_data, fenceCount, pFences);
4831        }
4832        // NOTE : Alternate case not handled here is when some fences have completed. In
4833        //  this case for app to guarantee which fences completed it will have to call
4834        //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
4835        lock.unlock();
4836    }
4837    if (skip_call)
4838        return VK_ERROR_VALIDATION_FAILED_EXT;
4839    return result;
4840}
4841
4842VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
4843    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4844    bool skipCall = false;
4845    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4846    std::unique_lock<std::mutex> lock(global_lock);
4847    skipCall = verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
4848    lock.unlock();
4849
4850    if (skipCall)
4851        return result;
4852
4853    result = dev_data->device_dispatch_table->GetFenceStatus(device, fence);
4854    bool skip_call = false;
4855    lock.lock();
4856    if (result == VK_SUCCESS) {
4857        skipCall |= decrementResources(dev_data, 1, &fence);
4858    }
4859    lock.unlock();
4860    if (skip_call)
4861        return VK_ERROR_VALIDATION_FAILED_EXT;
4862    return result;
4863}
4864
4865VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
4866                                                            VkQueue *pQueue) {
4867    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4868    dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
4869    std::lock_guard<std::mutex> lock(global_lock);
4870
4871    // Add queue to tracking set only if it is new
4872    auto result = dev_data->queues.emplace(*pQueue);
4873    if (result.second == true) {
4874        QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue];
4875        pQNode->queue = *pQueue;
4876        pQNode->device = device;
4877    }
4878}
4879
4880VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
4881    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
4882    bool skip_call = false;
4883    skip_call |= decrementResources(dev_data, queue);
4884    if (skip_call)
4885        return VK_ERROR_VALIDATION_FAILED_EXT;
4886    VkResult result = dev_data->device_dispatch_table->QueueWaitIdle(queue);
4887    return result;
4888}
4889
4890VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
4891    bool skip_call = false;
4892    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4893    std::unique_lock<std::mutex> lock(global_lock);
4894    for (auto queue : dev_data->queues) {
4895        skip_call |= decrementResources(dev_data, queue);
4896    }
4897    dev_data->globalInFlightCmdBuffers.clear();
4898    lock.unlock();
4899    if (skip_call)
4900        return VK_ERROR_VALIDATION_FAILED_EXT;
4901    VkResult result = dev_data->device_dispatch_table->DeviceWaitIdle(device);
4902    return result;
4903}
4904
4905VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
4906    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4907    bool skipCall = false;
4908    std::unique_lock<std::mutex> lock(global_lock);
4909    auto fence_pair = dev_data->fenceMap.find(fence);
4910    if (fence_pair != dev_data->fenceMap.end()) {
4911        if (fence_pair->second.state == FENCE_INFLIGHT) {
4912            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4913                                (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
4914                                "Fence 0x%" PRIx64 " is in use.", (uint64_t)(fence));
4915        }
4916        dev_data->fenceMap.erase(fence_pair);
4917    }
4918    lock.unlock();
4919
4920    if (!skipCall)
4921        dev_data->device_dispatch_table->DestroyFence(device, fence, pAllocator);
4922}
4923
4924VKAPI_ATTR void VKAPI_CALL
4925DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
4926    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4927    dev_data->device_dispatch_table->DestroySemaphore(device, semaphore, pAllocator);
4928    std::lock_guard<std::mutex> lock(global_lock);
4929    auto item = dev_data->semaphoreMap.find(semaphore);
4930    if (item != dev_data->semaphoreMap.end()) {
4931        if (item->second.in_use.load()) {
4932            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4933                    reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
4934                    "Cannot delete semaphore 0x%" PRIx64 " which is in use.", reinterpret_cast<uint64_t &>(semaphore));
4935        }
4936        dev_data->semaphoreMap.erase(semaphore);
4937    }
4938    // TODO : Clean up any internal data structures using this obj.
4939}
4940
4941VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
4942    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4943    bool skip_call = false;
4944    std::unique_lock<std::mutex> lock(global_lock);
4945    auto event_data = dev_data->eventMap.find(event);
4946    if (event_data != dev_data->eventMap.end()) {
4947        if (event_data->second.in_use.load()) {
4948            skip_call |= log_msg(
4949                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4950                reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
4951                "Cannot delete event 0x%" PRIx64 " which is in use by a command buffer.", reinterpret_cast<uint64_t &>(event));
4952        }
4953        dev_data->eventMap.erase(event_data);
4954    }
4955    lock.unlock();
4956    if (!skip_call)
4957        dev_data->device_dispatch_table->DestroyEvent(device, event, pAllocator);
4958    // TODO : Clean up any internal data structures using this obj.
4959}
4960
4961VKAPI_ATTR void VKAPI_CALL
4962DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
4963    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
4964        ->device_dispatch_table->DestroyQueryPool(device, queryPool, pAllocator);
4965    // TODO : Clean up any internal data structures using this obj.
4966}
4967
4968VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
4969                                                   uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
4970                                                   VkQueryResultFlags flags) {
4971    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4972    unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight;
4973    std::unique_lock<std::mutex> lock(global_lock);
4974    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
4975        auto pCB = getCBNode(dev_data, cmdBuffer);
4976        for (auto queryStatePair : pCB->queryToStateMap) {
4977            queriesInFlight[queryStatePair.first].push_back(cmdBuffer);
4978        }
4979    }
4980    bool skip_call = false;
4981    for (uint32_t i = 0; i < queryCount; ++i) {
4982        QueryObject query = {queryPool, firstQuery + i};
4983        auto queryElement = queriesInFlight.find(query);
4984        auto queryToStateElement = dev_data->queryToStateMap.find(query);
4985        if (queryToStateElement != dev_data->queryToStateMap.end()) {
4986            // Available and in flight
4987            if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
4988                queryToStateElement->second) {
4989                for (auto cmdBuffer : queryElement->second) {
4990                    auto pCB = getCBNode(dev_data, cmdBuffer);
4991                    auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query);
4992                    if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) {
4993                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4994                                             VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
4995                                             "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
4996                                             (uint64_t)(queryPool), firstQuery + i);
4997                    } else {
4998                        for (auto event : queryEventElement->second) {
4999                            dev_data->eventMap[event].needsSignaled = true;
5000                        }
5001                    }
5002                }
5003                // Unavailable and in flight
5004            } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5005                       !queryToStateElement->second) {
5006                // TODO : Can there be the same query in use by multiple command buffers in flight?
5007                bool make_available = false;
5008                for (auto cmdBuffer : queryElement->second) {
5009                    auto pCB = getCBNode(dev_data, cmdBuffer);
5010                    make_available |= pCB->queryToStateMap[query];
5011                }
5012                if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5013                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5014                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5015                                         "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5016                                         (uint64_t)(queryPool), firstQuery + i);
5017                }
5018                // Unavailable
5019            } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) {
5020                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5021                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5022                                     "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5023                                     (uint64_t)(queryPool), firstQuery + i);
5024                // Unitialized
5025            } else if (queryToStateElement == dev_data->queryToStateMap.end()) {
5026                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5027                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5028                                     "Cannot get query results on queryPool 0x%" PRIx64
5029                                     " with index %d as data has not been collected for this index.",
5030                                     (uint64_t)(queryPool), firstQuery + i);
5031            }
5032        }
5033    }
5034    lock.unlock();
5035    if (skip_call)
5036        return VK_ERROR_VALIDATION_FAILED_EXT;
5037    return dev_data->device_dispatch_table->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride,
5038                                                                flags);
5039}
5040
5041static bool validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5042    bool skip_call = false;
5043    auto buffer_node = getBufferNode(my_data, buffer);
5044    if (!buffer_node) {
5045        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5046                             (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5047                             "Cannot free buffer 0x%" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5048    } else {
5049        if (buffer_node->in_use.load()) {
5050            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5051                                 (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
5052                                 "Cannot free buffer 0x%" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer));
5053        }
5054    }
5055    return skip_call;
5056}
5057
5058static bool print_memory_range_error(layer_data *dev_data, const uint64_t object_handle, const uint64_t other_handle,
5059                                     VkDebugReportObjectTypeEXT object_type) {
5060    if (object_type == VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT) {
5061        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5062                       MEMTRACK_INVALID_ALIASING, "MEM", "Buffer 0x%" PRIx64 " is aliased with image 0x%" PRIx64, object_handle,
5063                       other_handle);
5064    } else {
5065        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5066                       MEMTRACK_INVALID_ALIASING, "MEM", "Image 0x%" PRIx64 " is aliased with buffer 0x%" PRIx64, object_handle,
5067                       other_handle);
5068    }
5069}
5070
5071static bool validate_memory_range(layer_data *dev_data, const vector<MEMORY_RANGE> &ranges, const MEMORY_RANGE &new_range,
5072                                  VkDebugReportObjectTypeEXT object_type) {
5073    bool skip_call = false;
5074
5075    for (auto range : ranges) {
5076        if ((range.end & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)) <
5077            (new_range.start & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)))
5078            continue;
5079        if ((range.start & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)) >
5080            (new_range.end & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)))
5081            continue;
5082        skip_call |= print_memory_range_error(dev_data, new_range.handle, range.handle, object_type);
5083    }
5084    return skip_call;
5085}
5086
5087static MEMORY_RANGE insert_memory_ranges(uint64_t handle, VkDeviceMemory mem, VkDeviceSize memoryOffset,
5088                                         VkMemoryRequirements memRequirements, vector<MEMORY_RANGE> &ranges) {
5089    MEMORY_RANGE range;
5090    range.handle = handle;
5091    range.memory = mem;
5092    range.start = memoryOffset;
5093    range.end = memoryOffset + memRequirements.size - 1;
5094    ranges.push_back(range);
5095    return range;
5096}
5097
5098static void remove_memory_ranges(uint64_t handle, VkDeviceMemory mem, vector<MEMORY_RANGE> &ranges) {
5099    for (uint32_t item = 0; item < ranges.size(); item++) {
5100        if ((ranges[item].handle == handle) && (ranges[item].memory == mem)) {
5101            ranges.erase(ranges.begin() + item);
5102            break;
5103        }
5104    }
5105}
5106
5107VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer,
5108                                         const VkAllocationCallbacks *pAllocator) {
5109    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5110    bool skipCall = false;
5111    std::unique_lock<std::mutex> lock(global_lock);
5112    if (!validateIdleBuffer(dev_data, buffer) && !skipCall) {
5113        lock.unlock();
5114        dev_data->device_dispatch_table->DestroyBuffer(device, buffer, pAllocator);
5115        lock.lock();
5116    }
5117    // Clean up memory binding and range information for buffer
5118    auto buff_it = dev_data->bufferMap.find(buffer);
5119    if (buff_it != dev_data->bufferMap.end()) {
5120        auto mem_info = getMemObjInfo(dev_data, buff_it->second.get()->mem);
5121        if (mem_info) {
5122            remove_memory_ranges(reinterpret_cast<uint64_t &>(buffer), buff_it->second.get()->mem, mem_info->bufferRanges);
5123        }
5124        clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5125        dev_data->bufferMap.erase(buff_it);
5126    }
5127}
5128
5129VKAPI_ATTR void VKAPI_CALL
5130DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5131    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5132    dev_data->device_dispatch_table->DestroyBufferView(device, bufferView, pAllocator);
5133    std::lock_guard<std::mutex> lock(global_lock);
5134    auto item = dev_data->bufferViewMap.find(bufferView);
5135    if (item != dev_data->bufferViewMap.end()) {
5136        dev_data->bufferViewMap.erase(item);
5137    }
5138}
5139
5140VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5141    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5142    bool skipCall = false;
5143    if (!skipCall) {
5144        dev_data->device_dispatch_table->DestroyImage(device, image, pAllocator);
5145    }
5146
5147    std::lock_guard<std::mutex> lock(global_lock);
5148    const auto &imageEntry = dev_data->imageMap.find(image);
5149    if (imageEntry != dev_data->imageMap.end()) {
5150        // Clean up memory mapping, bindings and range references for image
5151        auto mem_info = getMemObjInfo(dev_data, imageEntry->second.get()->mem);
5152        if (mem_info) {
5153            remove_memory_ranges(reinterpret_cast<uint64_t &>(image), imageEntry->second.get()->mem, mem_info->imageRanges);
5154            clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
5155            mem_info->image = VK_NULL_HANDLE;
5156        }
5157        // Remove image from imageMap
5158        dev_data->imageMap.erase(imageEntry);
5159    }
5160    const auto& subEntry = dev_data->imageSubresourceMap.find(image);
5161    if (subEntry != dev_data->imageSubresourceMap.end()) {
5162        for (const auto& pair : subEntry->second) {
5163            dev_data->imageLayoutMap.erase(pair);
5164        }
5165        dev_data->imageSubresourceMap.erase(subEntry);
5166    }
5167}
5168
5169static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
5170                                  const char *funcName) {
5171    bool skip_call = false;
5172    if (((1 << mem_info->allocInfo.memoryTypeIndex) & memory_type_bits) == 0) {
5173        skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5174                            reinterpret_cast<const uint64_t &>(mem_info->mem), __LINE__, MEMTRACK_INVALID_MEM_TYPE, "MT",
5175                            "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
5176                            "type (0x%X) of this memory object 0x%" PRIx64 ".",
5177                            funcName, memory_type_bits, mem_info->allocInfo.memoryTypeIndex,
5178                            reinterpret_cast<const uint64_t &>(mem_info->mem));
5179    }
5180    return skip_call;
5181}
5182
5183VKAPI_ATTR VkResult VKAPI_CALL
5184BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
5185    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5186    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5187    std::unique_lock<std::mutex> lock(global_lock);
5188    // Track objects tied to memory
5189    uint64_t buffer_handle = (uint64_t)(buffer);
5190    bool skipCall =
5191        set_mem_binding(dev_data, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
5192    auto buffer_node = getBufferNode(dev_data, buffer);
5193    if (buffer_node) {
5194        buffer_node->mem = mem;
5195        VkMemoryRequirements memRequirements;
5196        dev_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, &memRequirements);
5197
5198        // Track and validate bound memory range information
5199        auto mem_info = getMemObjInfo(dev_data, mem);
5200        if (mem_info) {
5201            const MEMORY_RANGE range =
5202                insert_memory_ranges(buffer_handle, mem, memoryOffset, memRequirements, mem_info->bufferRanges);
5203            skipCall |= validate_memory_range(dev_data, mem_info->imageRanges, range, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5204            skipCall |= ValidateMemoryTypes(dev_data, mem_info, memRequirements.memoryTypeBits, "BindBufferMemory");
5205        }
5206
5207        // Validate memory requirements alignment
5208        if (vk_safe_modulo(memoryOffset, memRequirements.alignment) != 0) {
5209            skipCall |=
5210                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
5211                        __LINE__, DRAWSTATE_INVALID_BUFFER_MEMORY_OFFSET, "DS",
5212                        "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the "
5213                        "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
5214                        ", returned from a call to vkGetBufferMemoryRequirements with buffer",
5215                        memoryOffset, memRequirements.alignment);
5216        }
5217        // Validate device limits alignments
5218        VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
5219        if (usage & (VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT)) {
5220            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment) != 0) {
5221                skipCall |=
5222                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5223                            0, __LINE__, DRAWSTATE_INVALID_TEXEL_BUFFER_OFFSET, "DS",
5224                            "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5225                            "device limit minTexelBufferOffsetAlignment 0x%" PRIxLEAST64,
5226                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment);
5227            }
5228        }
5229        if (usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) {
5230            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) !=
5231                0) {
5232                skipCall |=
5233                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5234                            0, __LINE__, DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
5235                            "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5236                            "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64,
5237                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
5238            }
5239        }
5240        if (usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) {
5241            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) !=
5242                0) {
5243                skipCall |=
5244                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5245                            0, __LINE__, DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
5246                            "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5247                            "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64,
5248                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
5249            }
5250        }
5251    }
5252    print_mem_list(dev_data);
5253    lock.unlock();
5254    if (!skipCall) {
5255        result = dev_data->device_dispatch_table->BindBufferMemory(device, buffer, mem, memoryOffset);
5256    }
5257    return result;
5258}
5259
5260VKAPI_ATTR void VKAPI_CALL
5261GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
5262    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5263    // TODO : What to track here?
5264    //   Could potentially save returned mem requirements and validate values passed into BindBufferMemory
5265    my_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
5266}
5267
5268VKAPI_ATTR void VKAPI_CALL
5269GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5270    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5271    // TODO : What to track here?
5272    //   Could potentially save returned mem requirements and validate values passed into BindImageMemory
5273    my_data->device_dispatch_table->GetImageMemoryRequirements(device, image, pMemoryRequirements);
5274}
5275
5276VKAPI_ATTR void VKAPI_CALL
5277DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
5278    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5279        ->device_dispatch_table->DestroyImageView(device, imageView, pAllocator);
5280    // TODO : Clean up any internal data structures using this obj.
5281}
5282
5283VKAPI_ATTR void VKAPI_CALL
5284DestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
5285    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5286
5287    std::unique_lock<std::mutex> lock(global_lock);
5288    my_data->shaderModuleMap.erase(shaderModule);
5289    lock.unlock();
5290
5291    my_data->device_dispatch_table->DestroyShaderModule(device, shaderModule, pAllocator);
5292}
5293
5294VKAPI_ATTR void VKAPI_CALL
5295DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
5296    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyPipeline(device, pipeline, pAllocator);
5297    // TODO : Clean up any internal data structures using this obj.
5298}
5299
5300VKAPI_ATTR void VKAPI_CALL
5301DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
5302    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5303        ->device_dispatch_table->DestroyPipelineLayout(device, pipelineLayout, pAllocator);
5304    // TODO : Clean up any internal data structures using this obj.
5305}
5306
5307VKAPI_ATTR void VKAPI_CALL
5308DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
5309    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroySampler(device, sampler, pAllocator);
5310    // TODO : Clean up any internal data structures using this obj.
5311}
5312
5313VKAPI_ATTR void VKAPI_CALL
5314DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
5315    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5316        ->device_dispatch_table->DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
5317    // TODO : Clean up any internal data structures using this obj.
5318}
5319
5320VKAPI_ATTR void VKAPI_CALL
5321DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
5322    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5323        ->device_dispatch_table->DestroyDescriptorPool(device, descriptorPool, pAllocator);
5324    // TODO : Clean up any internal data structures using this obj.
5325}
5326// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip_call result
5327//  If this is a secondary command buffer, then make sure its primary is also in-flight
5328//  If primary is not in-flight, then remove secondary from global in-flight set
5329// This function is only valid at a point when cmdBuffer is being reset or freed
5330static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action) {
5331    bool skip_call = false;
5332    if (dev_data->globalInFlightCmdBuffers.count(cb_node->commandBuffer)) {
5333        // Primary CB or secondary where primary is also in-flight is an error
5334        if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_SECONDARY) ||
5335            (dev_data->globalInFlightCmdBuffers.count(cb_node->primaryCommandBuffer))) {
5336            skip_call |= log_msg(
5337                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5338                reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
5339                "Attempt to %s command buffer (0x%" PRIxLEAST64 ") which is in use.", action,
5340                reinterpret_cast<const uint64_t &>(cb_node->commandBuffer));
5341        }
5342    }
5343    return skip_call;
5344}
5345
5346// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
5347static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action) {
5348    bool skip_call = false;
5349    for (auto cmd_buffer : pPool->commandBuffers) {
5350        if (dev_data->globalInFlightCmdBuffers.count(cmd_buffer)) {
5351            skip_call |= checkCommandBufferInFlight(dev_data, getCBNode(dev_data, cmd_buffer), action);
5352        }
5353    }
5354    return skip_call;
5355}
5356
5357static void clearCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool) {
5358    for (auto cmd_buffer : pPool->commandBuffers) {
5359        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
5360    }
5361}
5362
5363VKAPI_ATTR void VKAPI_CALL
5364FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
5365    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5366    bool skip_call = false;
5367    std::unique_lock<std::mutex> lock(global_lock);
5368
5369    for (uint32_t i = 0; i < commandBufferCount; i++) {
5370        auto cb_pair = dev_data->commandBufferMap.find(pCommandBuffers[i]);
5371        // Delete CB information structure, and remove from commandBufferMap
5372        if (cb_pair != dev_data->commandBufferMap.end()) {
5373            skip_call |= checkCommandBufferInFlight(dev_data, cb_pair->second, "free");
5374        }
5375    }
5376    lock.unlock();
5377
5378    if (skip_call)
5379        return;
5380
5381    dev_data->device_dispatch_table->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
5382
5383    lock.lock();
5384    auto pPool = getCommandPoolNode(dev_data, commandPool);
5385    for (uint32_t i = 0; i < commandBufferCount; i++) {
5386        auto cb_pair = dev_data->commandBufferMap.find(pCommandBuffers[i]);
5387        // Delete CB information structure, and remove from commandBufferMap
5388        if (cb_pair != dev_data->commandBufferMap.end()) {
5389            dev_data->globalInFlightCmdBuffers.erase(cb_pair->first);
5390            // reset prior to delete for data clean-up
5391            resetCB(dev_data, cb_pair->second->commandBuffer);
5392            delete cb_pair->second;
5393            dev_data->commandBufferMap.erase(cb_pair);
5394        }
5395
5396        // Remove commandBuffer reference from commandPoolMap
5397        pPool->commandBuffers.remove(pCommandBuffers[i]);
5398    }
5399    printCBList(dev_data);
5400    lock.unlock();
5401}
5402
5403VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
5404                                                 const VkAllocationCallbacks *pAllocator,
5405                                                 VkCommandPool *pCommandPool) {
5406    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5407
5408    VkResult result = dev_data->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
5409
5410    if (VK_SUCCESS == result) {
5411        std::lock_guard<std::mutex> lock(global_lock);
5412        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
5413        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
5414    }
5415    return result;
5416}
5417
5418VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
5419                                               const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
5420
5421    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5422    VkResult result = dev_data->device_dispatch_table->CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
5423    if (result == VK_SUCCESS) {
5424        std::lock_guard<std::mutex> lock(global_lock);
5425        dev_data->queryPoolMap[*pQueryPool].createInfo = *pCreateInfo;
5426    }
5427    return result;
5428}
5429
5430// Destroy commandPool along with all of the commandBuffers allocated from that pool
5431VKAPI_ATTR void VKAPI_CALL
5432DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
5433    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5434    bool skipCall = false;
5435    std::unique_lock<std::mutex> lock(global_lock);
5436    // Verify that command buffers in pool are complete (not in-flight)
5437    auto pPool = getCommandPoolNode(dev_data, commandPool);
5438    skipCall |= checkCommandBuffersInFlight(dev_data, pPool, "destroy command pool with");
5439
5440    lock.unlock();
5441
5442    if (skipCall)
5443        return;
5444
5445    dev_data->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator);
5446
5447    lock.lock();
5448    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandPoolMap
5449    clearCommandBuffersInFlight(dev_data, pPool);
5450    for (auto cb : pPool->commandBuffers) {
5451        clear_cmd_buf_and_mem_references(dev_data, cb);
5452        auto del_cb = dev_data->commandBufferMap.find(cb);
5453        delete del_cb->second;                  // delete CB info structure
5454        dev_data->commandBufferMap.erase(del_cb); // Remove this command buffer
5455    }
5456    dev_data->commandPoolMap.erase(commandPool);
5457    lock.unlock();
5458}
5459
5460VKAPI_ATTR VkResult VKAPI_CALL
5461ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
5462    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5463    bool skipCall = false;
5464
5465    std::unique_lock<std::mutex> lock(global_lock);
5466    auto pPool = getCommandPoolNode(dev_data, commandPool);
5467    skipCall |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with");
5468    lock.unlock();
5469
5470    if (skipCall)
5471        return VK_ERROR_VALIDATION_FAILED_EXT;
5472
5473    VkResult result = dev_data->device_dispatch_table->ResetCommandPool(device, commandPool, flags);
5474
5475    // Reset all of the CBs allocated from this pool
5476    if (VK_SUCCESS == result) {
5477        lock.lock();
5478        clearCommandBuffersInFlight(dev_data, pPool);
5479        for (auto cmdBuffer : pPool->commandBuffers) {
5480            resetCB(dev_data, cmdBuffer);
5481        }
5482        lock.unlock();
5483    }
5484    return result;
5485}
5486
5487VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
5488    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5489    bool skipCall = false;
5490    std::unique_lock<std::mutex> lock(global_lock);
5491    for (uint32_t i = 0; i < fenceCount; ++i) {
5492        auto pFence = getFenceNode(dev_data, pFences[i]);
5493        if (pFence && pFence->state == FENCE_INFLIGHT) {
5494            skipCall |=
5495                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5496                            reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5497                            "Fence 0x%" PRIx64 " is in use.", reinterpret_cast<const uint64_t &>(pFences[i]));
5498        }
5499    }
5500    lock.unlock();
5501
5502    if (skipCall)
5503        return VK_ERROR_VALIDATION_FAILED_EXT;
5504
5505    VkResult result = dev_data->device_dispatch_table->ResetFences(device, fenceCount, pFences);
5506
5507    if (result == VK_SUCCESS) {
5508        lock.lock();
5509        for (uint32_t i = 0; i < fenceCount; ++i) {
5510            auto pFence = getFenceNode(dev_data, pFences[i]);
5511            if (pFence) {
5512                pFence->state = FENCE_UNSIGNALED;
5513                // TODO: these should really have already been enforced on
5514                // INFLIGHT->RETIRED transition.
5515                pFence->queues.clear();
5516                pFence->priorFences.clear();
5517            }
5518        }
5519        lock.unlock();
5520    }
5521
5522    return result;
5523}
5524
5525VKAPI_ATTR void VKAPI_CALL
5526DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
5527    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5528    std::unique_lock<std::mutex> lock(global_lock);
5529    auto fbNode = dev_data->frameBufferMap.find(framebuffer);
5530    if (fbNode != dev_data->frameBufferMap.end()) {
5531        for (auto cb : fbNode->second->referencingCmdBuffers) {
5532            auto cbNode = dev_data->commandBufferMap.find(cb);
5533            if (cbNode != dev_data->commandBufferMap.end()) {
5534                // Set CB as invalid and record destroyed framebuffer
5535                cbNode->second->state = CB_INVALID;
5536                cbNode->second->destroyedFramebuffers.insert(framebuffer);
5537            }
5538        }
5539        dev_data->frameBufferMap.erase(fbNode);
5540    }
5541    lock.unlock();
5542    dev_data->device_dispatch_table->DestroyFramebuffer(device, framebuffer, pAllocator);
5543}
5544
5545VKAPI_ATTR void VKAPI_CALL
5546DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
5547    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5548    dev_data->device_dispatch_table->DestroyRenderPass(device, renderPass, pAllocator);
5549    std::lock_guard<std::mutex> lock(global_lock);
5550    dev_data->renderPassMap.erase(renderPass);
5551    // TODO: leaking all the guts of the renderpass node here!
5552}
5553
5554VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
5555                                            const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
5556    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5557
5558    VkResult result = dev_data->device_dispatch_table->CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
5559
5560    if (VK_SUCCESS == result) {
5561        std::lock_guard<std::mutex> lock(global_lock);
5562        // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
5563        dev_data->bufferMap.insert(std::make_pair(*pBuffer, unique_ptr<BUFFER_NODE>(new BUFFER_NODE(pCreateInfo))));
5564    }
5565    return result;
5566}
5567
5568VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
5569                                                const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
5570    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5571    VkResult result = dev_data->device_dispatch_table->CreateBufferView(device, pCreateInfo, pAllocator, pView);
5572    if (VK_SUCCESS == result) {
5573        std::lock_guard<std::mutex> lock(global_lock);
5574        dev_data->bufferViewMap[*pView] = unique_ptr<VkBufferViewCreateInfo>(new VkBufferViewCreateInfo(*pCreateInfo));
5575        // In order to create a valid buffer view, the buffer must have been created with at least one of the
5576        // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
5577        validate_buffer_usage_flags(dev_data, pCreateInfo->buffer,
5578                                    VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, false,
5579                                    "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
5580    }
5581    return result;
5582}
5583
5584VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
5585                                           const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
5586    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5587
5588    VkResult result = dev_data->device_dispatch_table->CreateImage(device, pCreateInfo, pAllocator, pImage);
5589
5590    if (VK_SUCCESS == result) {
5591        std::lock_guard<std::mutex> lock(global_lock);
5592        IMAGE_LAYOUT_NODE image_node;
5593        image_node.layout = pCreateInfo->initialLayout;
5594        image_node.format = pCreateInfo->format;
5595        dev_data->imageMap.insert(std::make_pair(*pImage, unique_ptr<IMAGE_NODE>(new IMAGE_NODE(pCreateInfo))));
5596        ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()};
5597        dev_data->imageSubresourceMap[*pImage].push_back(subpair);
5598        dev_data->imageLayoutMap[subpair] = image_node;
5599    }
5600    return result;
5601}
5602
5603static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
5604    /* expects global_lock to be held by caller */
5605
5606    auto image_node = getImageNode(dev_data, image);
5607    if (image_node) {
5608        /* If the caller used the special values VK_REMAINING_MIP_LEVELS and
5609         * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to
5610         * the actual values.
5611         */
5612        if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
5613            range->levelCount = image_node->createInfo.mipLevels - range->baseMipLevel;
5614        }
5615
5616        if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
5617            range->layerCount = image_node->createInfo.arrayLayers - range->baseArrayLayer;
5618        }
5619    }
5620}
5621
5622// Return the correct layer/level counts if the caller used the special
5623// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
5624static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
5625                                         VkImage image) {
5626    /* expects global_lock to be held by caller */
5627
5628    *levels = range.levelCount;
5629    *layers = range.layerCount;
5630    auto image_node = getImageNode(dev_data, image);
5631    if (image_node) {
5632        if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
5633            *levels = image_node->createInfo.mipLevels - range.baseMipLevel;
5634        }
5635        if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
5636            *layers = image_node->createInfo.arrayLayers - range.baseArrayLayer;
5637        }
5638    }
5639}
5640
5641VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
5642                                               const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
5643    bool skipCall = false;
5644    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5645    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5646    {
5647        // Validate that img has correct usage flags set
5648        std::lock_guard<std::mutex> lock(global_lock);
5649        skipCall |= validate_image_usage_flags(dev_data, pCreateInfo->image,
5650                VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT |
5651                VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
5652                false, "vkCreateImageView()", "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT]_BIT");
5653    }
5654
5655    if (!skipCall) {
5656        result = dev_data->device_dispatch_table->CreateImageView(device, pCreateInfo, pAllocator, pView);
5657    }
5658
5659    if (VK_SUCCESS == result) {
5660        std::lock_guard<std::mutex> lock(global_lock);
5661        dev_data->imageViewMap[*pView] = unique_ptr<VkImageViewCreateInfo>(new VkImageViewCreateInfo(*pCreateInfo));
5662        ResolveRemainingLevelsLayers(dev_data, &dev_data->imageViewMap[*pView].get()->subresourceRange, pCreateInfo->image);
5663    }
5664
5665    return result;
5666}
5667
5668VKAPI_ATTR VkResult VKAPI_CALL
5669CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
5670    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5671    VkResult result = dev_data->device_dispatch_table->CreateFence(device, pCreateInfo, pAllocator, pFence);
5672    if (VK_SUCCESS == result) {
5673        std::lock_guard<std::mutex> lock(global_lock);
5674        auto &fence_node = dev_data->fenceMap[*pFence];
5675        fence_node.fence = *pFence;
5676        fence_node.createInfo = *pCreateInfo;
5677        fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
5678    }
5679    return result;
5680}
5681
5682// TODO handle pipeline caches
5683VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
5684                                                   const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
5685    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5686    VkResult result = dev_data->device_dispatch_table->CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
5687    return result;
5688}
5689
5690VKAPI_ATTR void VKAPI_CALL
5691DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
5692    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5693    dev_data->device_dispatch_table->DestroyPipelineCache(device, pipelineCache, pAllocator);
5694}
5695
5696VKAPI_ATTR VkResult VKAPI_CALL
5697GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
5698    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5699    VkResult result = dev_data->device_dispatch_table->GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
5700    return result;
5701}
5702
5703VKAPI_ATTR VkResult VKAPI_CALL
5704MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
5705    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5706    VkResult result = dev_data->device_dispatch_table->MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
5707    return result;
5708}
5709
5710// utility function to set collective state for pipeline
5711void set_pipeline_state(PIPELINE_NODE *pPipe) {
5712    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
5713    if (pPipe->graphicsPipelineCI.pColorBlendState) {
5714        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
5715            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
5716                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5717                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
5718                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5719                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
5720                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5721                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
5722                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5723                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
5724                    pPipe->blendConstantsEnabled = true;
5725                }
5726            }
5727        }
5728    }
5729}
5730
5731VKAPI_ATTR VkResult VKAPI_CALL
5732CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
5733                        const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
5734                        VkPipeline *pPipelines) {
5735    VkResult result = VK_SUCCESS;
5736    // TODO What to do with pipelineCache?
5737    // The order of operations here is a little convoluted but gets the job done
5738    //  1. Pipeline create state is first shadowed into PIPELINE_NODE struct
5739    //  2. Create state is then validated (which uses flags setup during shadowing)
5740    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
5741    bool skipCall = false;
5742    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
5743    vector<PIPELINE_NODE *> pPipeNode(count);
5744    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5745
5746    uint32_t i = 0;
5747    std::unique_lock<std::mutex> lock(global_lock);
5748
5749    for (i = 0; i < count; i++) {
5750        pPipeNode[i] = new PIPELINE_NODE;
5751        pPipeNode[i]->initGraphicsPipeline(&pCreateInfos[i]);
5752        pPipeNode[i]->renderPass = getRenderPass(dev_data, pCreateInfos[i].renderPass);
5753        pPipeNode[i]->pipelineLayout = getPipelineLayout(dev_data, pCreateInfos[i].layout);
5754
5755        skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode, i);
5756    }
5757
5758    if (!skipCall) {
5759        lock.unlock();
5760        result = dev_data->device_dispatch_table->CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
5761                                                                          pPipelines);
5762        lock.lock();
5763        for (i = 0; i < count; i++) {
5764            pPipeNode[i]->pipeline = pPipelines[i];
5765            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
5766        }
5767        lock.unlock();
5768    } else {
5769        for (i = 0; i < count; i++) {
5770            delete pPipeNode[i];
5771        }
5772        lock.unlock();
5773        return VK_ERROR_VALIDATION_FAILED_EXT;
5774    }
5775    return result;
5776}
5777
5778VKAPI_ATTR VkResult VKAPI_CALL
5779CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
5780                       const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
5781                       VkPipeline *pPipelines) {
5782    VkResult result = VK_SUCCESS;
5783    bool skipCall = false;
5784
5785    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
5786    vector<PIPELINE_NODE *> pPipeNode(count);
5787    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5788
5789    uint32_t i = 0;
5790    std::unique_lock<std::mutex> lock(global_lock);
5791    for (i = 0; i < count; i++) {
5792        // TODO: Verify compute stage bits
5793
5794        // Create and initialize internal tracking data structure
5795        pPipeNode[i] = new PIPELINE_NODE;
5796        pPipeNode[i]->initComputePipeline(&pCreateInfos[i]);
5797        pPipeNode[i]->pipelineLayout = getPipelineLayout(dev_data, pCreateInfos[i].layout);
5798        // memcpy(&pPipeNode[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo));
5799
5800        // TODO: Add Compute Pipeline Verification
5801        skipCall |= !validate_compute_pipeline(dev_data->report_data, pPipeNode[i],
5802                                               &dev_data->phys_dev_properties.features,
5803                                               dev_data->shaderModuleMap);
5804        // skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode[i]);
5805    }
5806
5807    if (!skipCall) {
5808        lock.unlock();
5809        result = dev_data->device_dispatch_table->CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
5810                                                                         pPipelines);
5811        lock.lock();
5812        for (i = 0; i < count; i++) {
5813            pPipeNode[i]->pipeline = pPipelines[i];
5814            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
5815        }
5816        lock.unlock();
5817    } else {
5818        for (i = 0; i < count; i++) {
5819            // Clean up any locally allocated data structures
5820            delete pPipeNode[i];
5821        }
5822        lock.unlock();
5823        return VK_ERROR_VALIDATION_FAILED_EXT;
5824    }
5825    return result;
5826}
5827
5828VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
5829                                             const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
5830    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5831    VkResult result = dev_data->device_dispatch_table->CreateSampler(device, pCreateInfo, pAllocator, pSampler);
5832    if (VK_SUCCESS == result) {
5833        std::lock_guard<std::mutex> lock(global_lock);
5834        dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_NODE>(new SAMPLER_NODE(pSampler, pCreateInfo));
5835    }
5836    return result;
5837}
5838
5839VKAPI_ATTR VkResult VKAPI_CALL
5840CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
5841                          const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
5842    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5843    VkResult result = dev_data->device_dispatch_table->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
5844    if (VK_SUCCESS == result) {
5845        // TODOSC : Capture layout bindings set
5846        std::lock_guard<std::mutex> lock(global_lock);
5847        dev_data->descriptorSetLayoutMap[*pSetLayout] =
5848            new cvdescriptorset::DescriptorSetLayout(dev_data->report_data, pCreateInfo, *pSetLayout);
5849    }
5850    return result;
5851}
5852
5853// Used by CreatePipelineLayout and CmdPushConstants.
5854// Note that the index argument is optional and only used by CreatePipelineLayout.
5855static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
5856                                      const char *caller_name, uint32_t index = 0) {
5857    uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
5858    bool skipCall = false;
5859    // Check that offset + size don't exceed the max.
5860    // Prevent arithetic overflow here by avoiding addition and testing in this order.
5861    if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
5862        // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
5863        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5864            skipCall |=
5865                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5866                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with offset %u and size %u that "
5867                                                              "exceeds this device's maxPushConstantSize of %u.",
5868                        caller_name, index, offset, size, maxPushConstantsSize);
5869        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5870            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5871                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that "
5872                                                                      "exceeds this device's maxPushConstantSize of %u.",
5873                                caller_name, offset, size, maxPushConstantsSize);
5874        } else {
5875            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5876                                DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
5877        }
5878    }
5879    // size needs to be non-zero and a multiple of 4.
5880    if ((size == 0) || ((size & 0x3) != 0)) {
5881        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5882            skipCall |=
5883                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5884                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with "
5885                                                              "size %u. Size must be greater than zero and a multiple of 4.",
5886                        caller_name, index, size);
5887        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5888            skipCall |=
5889                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5890                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
5891                                                              "size %u. Size must be greater than zero and a multiple of 4.",
5892                        caller_name, size);
5893        } else {
5894            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5895                                DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
5896        }
5897    }
5898    // offset needs to be a multiple of 4.
5899    if ((offset & 0x3) != 0) {
5900        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5901            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5902                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with "
5903                                                                      "offset %u. Offset must be a multiple of 4.",
5904                                caller_name, index, offset);
5905        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5906            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5907                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
5908                                                                      "offset %u. Offset must be a multiple of 4.",
5909                                caller_name, offset);
5910        } else {
5911            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5912                                DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
5913        }
5914    }
5915    return skipCall;
5916}
5917
5918VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
5919                                                    const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
5920    bool skipCall = false;
5921    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5922    // Push Constant Range checks
5923    uint32_t i = 0;
5924    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5925        skipCall |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
5926                                              pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
5927        if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
5928            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5929                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has no stageFlags set.");
5930        }
5931    }
5932    // Each range has been validated.  Now check for overlap between ranges (if they are good).
5933    if (!skipCall) {
5934        uint32_t i, j;
5935        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5936            for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
5937                const uint32_t minA = pCreateInfo->pPushConstantRanges[i].offset;
5938                const uint32_t maxA = minA + pCreateInfo->pPushConstantRanges[i].size;
5939                const uint32_t minB = pCreateInfo->pPushConstantRanges[j].offset;
5940                const uint32_t maxB = minB + pCreateInfo->pPushConstantRanges[j].size;
5941                if ((minA <= minB && maxA > minB) || (minB <= minA && maxB > minA)) {
5942                    skipCall |=
5943                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5944                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constants with "
5945                                                                      "overlapping ranges: %u:[%u, %u), %u:[%u, %u)",
5946                                i, minA, maxA, j, minB, maxB);
5947                }
5948            }
5949        }
5950    }
5951
5952    if (skipCall)
5953        return VK_ERROR_VALIDATION_FAILED_EXT;
5954
5955    VkResult result = dev_data->device_dispatch_table->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
5956    if (VK_SUCCESS == result) {
5957        std::lock_guard<std::mutex> lock(global_lock);
5958        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
5959        plNode.descriptorSetLayouts.resize(pCreateInfo->setLayoutCount);
5960        plNode.setLayouts.resize(pCreateInfo->setLayoutCount);
5961        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
5962            plNode.descriptorSetLayouts[i] = pCreateInfo->pSetLayouts[i];
5963            plNode.setLayouts[i] = getDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
5964        }
5965        plNode.pushConstantRanges.resize(pCreateInfo->pushConstantRangeCount);
5966        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5967            plNode.pushConstantRanges[i] = pCreateInfo->pPushConstantRanges[i];
5968        }
5969    }
5970    return result;
5971}
5972
5973VKAPI_ATTR VkResult VKAPI_CALL
5974CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
5975                     VkDescriptorPool *pDescriptorPool) {
5976    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5977    VkResult result = dev_data->device_dispatch_table->CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
5978    if (VK_SUCCESS == result) {
5979        // Insert this pool into Global Pool LL at head
5980        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
5981                    (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool 0x%" PRIxLEAST64,
5982                    (uint64_t)*pDescriptorPool))
5983            return VK_ERROR_VALIDATION_FAILED_EXT;
5984        DESCRIPTOR_POOL_NODE *pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo);
5985        if (NULL == pNewNode) {
5986            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
5987                        (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
5988                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()"))
5989                return VK_ERROR_VALIDATION_FAILED_EXT;
5990        } else {
5991            std::lock_guard<std::mutex> lock(global_lock);
5992            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
5993        }
5994    } else {
5995        // Need to do anything if pool create fails?
5996    }
5997    return result;
5998}
5999
6000VKAPI_ATTR VkResult VKAPI_CALL
6001ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
6002    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6003    VkResult result = dev_data->device_dispatch_table->ResetDescriptorPool(device, descriptorPool, flags);
6004    if (VK_SUCCESS == result) {
6005        std::lock_guard<std::mutex> lock(global_lock);
6006        clearDescriptorPool(dev_data, device, descriptorPool, flags);
6007    }
6008    return result;
6009}
6010// Ensure the pool contains enough descriptors and descriptor sets to satisfy
6011// an allocation request. Fills common_data with the total number of descriptors of each type required,
6012// as well as DescriptorSetLayout ptrs used for later update.
6013static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
6014                                                  cvdescriptorset::AllocateDescriptorSetsData *common_data) {
6015    // All state checks for AllocateDescriptorSets is done in single function
6016    return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data->report_data, pAllocateInfo, dev_data, common_data);
6017}
6018// Allocation state was good and call down chain was made so update state based on allocating descriptor sets
6019static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
6020                                                 VkDescriptorSet *pDescriptorSets,
6021                                                 const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
6022    // All the updates are contained in a single cvdescriptorset function
6023    cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
6024                                                   &dev_data->setMap, dev_data);
6025}
6026
6027VKAPI_ATTR VkResult VKAPI_CALL
6028AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
6029    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6030    std::unique_lock<std::mutex> lock(global_lock);
6031    cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
6032    bool skip_call = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
6033    lock.unlock();
6034
6035    if (skip_call)
6036        return VK_ERROR_VALIDATION_FAILED_EXT;
6037
6038    VkResult result = dev_data->device_dispatch_table->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
6039
6040    if (VK_SUCCESS == result) {
6041        lock.lock();
6042        PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
6043        lock.unlock();
6044    }
6045    return result;
6046}
6047// Verify state before freeing DescriptorSets
6048static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
6049                                              const VkDescriptorSet *descriptor_sets) {
6050    bool skip_call = false;
6051    // First make sure sets being destroyed are not currently in-use
6052    for (uint32_t i = 0; i < count; ++i)
6053        skip_call |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
6054
6055    DESCRIPTOR_POOL_NODE *pool_node = getPoolNode(dev_data, pool);
6056    if (pool_node && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_node->createInfo.flags)) {
6057        // Can't Free from a NON_FREE pool
6058        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6059                             reinterpret_cast<uint64_t &>(pool), __LINE__, DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, "DS",
6060                             "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
6061                             "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
6062    }
6063    return skip_call;
6064}
6065// Sets have been removed from the pool so update underlying state
6066static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
6067                                             const VkDescriptorSet *descriptor_sets) {
6068    DESCRIPTOR_POOL_NODE *pool_state = getPoolNode(dev_data, pool);
6069    // Update available descriptor sets in pool
6070    pool_state->availableSets += count;
6071
6072    // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
6073    for (uint32_t i = 0; i < count; ++i) {
6074        auto set_state = dev_data->setMap[descriptor_sets[i]];
6075        uint32_t type_index = 0, descriptor_count = 0;
6076        for (uint32_t j = 0; j < set_state->GetBindingCount(); ++j) {
6077            type_index = static_cast<uint32_t>(set_state->GetTypeFromIndex(j));
6078            descriptor_count = set_state->GetDescriptorCountFromIndex(j);
6079            pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
6080        }
6081        freeDescriptorSet(dev_data, set_state);
6082        pool_state->sets.erase(set_state);
6083    }
6084}
6085
6086VKAPI_ATTR VkResult VKAPI_CALL
6087FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
6088    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6089    // Make sure that no sets being destroyed are in-flight
6090    std::unique_lock<std::mutex> lock(global_lock);
6091    bool skipCall = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6092    lock.unlock();
6093    if (skipCall)
6094        return VK_ERROR_VALIDATION_FAILED_EXT;
6095    VkResult result = dev_data->device_dispatch_table->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
6096    if (VK_SUCCESS == result) {
6097        lock.lock();
6098        PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6099        lock.unlock();
6100    }
6101    return result;
6102}
6103// TODO : This is a Proof-of-concept for core validation architecture
6104//  Really we'll want to break out these functions to separate files but
6105//  keeping it all together here to prove out design
6106// PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
6107static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6108                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6109                                                const VkCopyDescriptorSet *pDescriptorCopies) {
6110    // First thing to do is perform map look-ups.
6111    // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
6112    //  so we can't just do a single map look-up up-front, but do them individually in functions below
6113
6114    // Now make call(s) that validate state, but don't perform state updates in this function
6115    // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
6116    //  namespace which will parse params and make calls into specific class instances
6117    return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
6118                                                         descriptorCopyCount, pDescriptorCopies);
6119}
6120// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
6121static void PostCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6122                                               const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6123                                               const VkCopyDescriptorSet *pDescriptorCopies) {
6124    cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6125                                                 pDescriptorCopies);
6126}
6127
6128VKAPI_ATTR void VKAPI_CALL
6129UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
6130                     uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
6131    // Only map look-up at top level is for device-level layer_data
6132    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6133    std::unique_lock<std::mutex> lock(global_lock);
6134    bool skip_call = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6135                                                         pDescriptorCopies);
6136    lock.unlock();
6137    if (!skip_call) {
6138        dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6139                                                              pDescriptorCopies);
6140        lock.lock();
6141        // Since UpdateDescriptorSets() is void, nothing to check prior to updating state
6142        PostCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6143                                           pDescriptorCopies);
6144    }
6145}
6146
6147VKAPI_ATTR VkResult VKAPI_CALL
6148AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
6149    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6150    VkResult result = dev_data->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
6151    if (VK_SUCCESS == result) {
6152        std::unique_lock<std::mutex> lock(global_lock);
6153        auto pPool = getCommandPoolNode(dev_data, pCreateInfo->commandPool);
6154
6155        if (pPool) {
6156            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
6157                // Add command buffer to its commandPool map
6158                pPool->commandBuffers.push_back(pCommandBuffer[i]);
6159                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
6160                // Add command buffer to map
6161                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
6162                resetCB(dev_data, pCommandBuffer[i]);
6163                pCB->createInfo = *pCreateInfo;
6164                pCB->device = device;
6165            }
6166        }
6167        printCBList(dev_data);
6168        lock.unlock();
6169    }
6170    return result;
6171}
6172
6173VKAPI_ATTR VkResult VKAPI_CALL
6174BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
6175    bool skipCall = false;
6176    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6177    std::unique_lock<std::mutex> lock(global_lock);
6178    // Validate command buffer level
6179    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6180    if (pCB) {
6181        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
6182        if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
6183            skipCall |=
6184                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6185                        (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6186                        "Calling vkBeginCommandBuffer() on active CB 0x%p before it has completed. "
6187                        "You must check CB fence before this call.",
6188                        commandBuffer);
6189        }
6190        clear_cmd_buf_and_mem_references(dev_data, pCB);
6191        if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
6192            // Secondary Command Buffer
6193            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
6194            if (!pInfo) {
6195                skipCall |=
6196                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6197                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6198                            "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must have inheritance info.",
6199                            reinterpret_cast<void *>(commandBuffer));
6200            } else {
6201                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
6202                    if (!pInfo->renderPass) { // renderpass should NOT be null for a Secondary CB
6203                        skipCall |= log_msg(
6204                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6205                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6206                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must specify a valid renderpass parameter.",
6207                            reinterpret_cast<void *>(commandBuffer));
6208                    }
6209                    if (!pInfo->framebuffer) { // framebuffer may be null for a Secondary CB, but this affects perf
6210                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
6211                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6212                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE,
6213                                            "DS", "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) may perform better if a "
6214                                                  "valid framebuffer parameter is specified.",
6215                                            reinterpret_cast<void *>(commandBuffer));
6216                    } else {
6217                        string errorString = "";
6218                        auto framebuffer = getFramebuffer(dev_data, pInfo->framebuffer);
6219                        if (framebuffer) {
6220                            VkRenderPass fbRP = framebuffer->createInfo.renderPass;
6221                            if (!verify_renderpass_compatibility(dev_data, fbRP, pInfo->renderPass, errorString)) {
6222                                // renderPass that framebuffer was created with must be compatible with local renderPass
6223                                skipCall |=
6224                                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6225                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6226                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
6227                                            "DS", "vkBeginCommandBuffer(): Secondary Command "
6228                                                  "Buffer (0x%p) renderPass (0x%" PRIxLEAST64 ") is incompatible w/ framebuffer "
6229                                                  "(0x%" PRIxLEAST64 ") w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
6230                                            reinterpret_cast<void *>(commandBuffer), (uint64_t)(pInfo->renderPass),
6231                                            (uint64_t)(pInfo->framebuffer), (uint64_t)(fbRP), errorString.c_str());
6232                            }
6233                            // Connect this framebuffer to this cmdBuffer
6234                            framebuffer->referencingCmdBuffers.insert(pCB->commandBuffer);
6235                        }
6236                    }
6237                }
6238                if ((pInfo->occlusionQueryEnable == VK_FALSE ||
6239                     dev_data->phys_dev_properties.features.occlusionQueryPrecise == VK_FALSE) &&
6240                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
6241                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6242                                        VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
6243                                        __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6244                                        "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must not have "
6245                                        "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
6246                                        "support precise occlusion queries.",
6247                                        reinterpret_cast<void *>(commandBuffer));
6248                }
6249            }
6250            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
6251                auto renderPass = getRenderPass(dev_data, pInfo->renderPass);
6252                if (renderPass) {
6253                    if (pInfo->subpass >= renderPass->pCreateInfo->subpassCount) {
6254                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6255                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
6256                                            DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6257                                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must has a subpass index (%d) "
6258                                            "that is less than the number of subpasses (%d).",
6259                                            (void *)commandBuffer, pInfo->subpass, renderPass->pCreateInfo->subpassCount);
6260                    }
6261                }
6262            }
6263        }
6264        if (CB_RECORDING == pCB->state) {
6265            skipCall |=
6266                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6267                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6268                        "vkBeginCommandBuffer(): Cannot call Begin on CB (0x%" PRIxLEAST64
6269                        ") in the RECORDING state. Must first call vkEndCommandBuffer().",
6270                        (uint64_t)commandBuffer);
6271        } else if (CB_RECORDED == pCB->state || (CB_INVALID == pCB->state && CMD_END == pCB->cmds.back().type)) {
6272            VkCommandPool cmdPool = pCB->createInfo.commandPool;
6273            auto pPool = getCommandPoolNode(dev_data, cmdPool);
6274            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
6275                skipCall |=
6276                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6277                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6278                            "Call to vkBeginCommandBuffer() on command buffer (0x%" PRIxLEAST64
6279                            ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIxLEAST64
6280                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6281                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
6282            }
6283            resetCB(dev_data, commandBuffer);
6284        }
6285        // Set updated state here in case implicit reset occurs above
6286        pCB->state = CB_RECORDING;
6287        pCB->beginInfo = *pBeginInfo;
6288        if (pCB->beginInfo.pInheritanceInfo) {
6289            pCB->inheritanceInfo = *(pCB->beginInfo.pInheritanceInfo);
6290            pCB->beginInfo.pInheritanceInfo = &pCB->inheritanceInfo;
6291            // If we are a secondary command-buffer and inheriting.  Update the items we should inherit.
6292            if ((pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
6293                (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
6294                pCB->activeRenderPass = getRenderPass(dev_data, pCB->beginInfo.pInheritanceInfo->renderPass);
6295                pCB->activeSubpass = pCB->beginInfo.pInheritanceInfo->subpass;
6296                pCB->framebuffers.insert(pCB->beginInfo.pInheritanceInfo->framebuffer);
6297            }
6298        }
6299    } else {
6300        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6301                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
6302                            "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB 0x%p!", (void *)commandBuffer);
6303    }
6304    lock.unlock();
6305    if (skipCall) {
6306        return VK_ERROR_VALIDATION_FAILED_EXT;
6307    }
6308    VkResult result = dev_data->device_dispatch_table->BeginCommandBuffer(commandBuffer, pBeginInfo);
6309
6310    return result;
6311}
6312
6313VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
6314    bool skipCall = false;
6315    VkResult result = VK_SUCCESS;
6316    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6317    std::unique_lock<std::mutex> lock(global_lock);
6318    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6319    if (pCB) {
6320        if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) || !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
6321            // This needs spec clarification to update valid usage, see comments in PR:
6322            // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
6323            skipCall |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer");
6324        }
6325        skipCall |= addCmd(dev_data, pCB, CMD_END, "vkEndCommandBuffer()");
6326        for (auto query : pCB->activeQueries) {
6327            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6328                                DRAWSTATE_INVALID_QUERY, "DS",
6329                                "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d",
6330                                (uint64_t)(query.pool), query.index);
6331        }
6332    }
6333    if (!skipCall) {
6334        lock.unlock();
6335        result = dev_data->device_dispatch_table->EndCommandBuffer(commandBuffer);
6336        lock.lock();
6337        if (VK_SUCCESS == result) {
6338            pCB->state = CB_RECORDED;
6339            // Reset CB status flags
6340            pCB->status = 0;
6341            printCB(dev_data, commandBuffer);
6342        }
6343    } else {
6344        result = VK_ERROR_VALIDATION_FAILED_EXT;
6345    }
6346    lock.unlock();
6347    return result;
6348}
6349
6350VKAPI_ATTR VkResult VKAPI_CALL
6351ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
6352    bool skip_call = false;
6353    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6354    std::unique_lock<std::mutex> lock(global_lock);
6355    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6356    VkCommandPool cmdPool = pCB->createInfo.commandPool;
6357    auto pPool = getCommandPoolNode(dev_data, cmdPool);
6358    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
6359        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6360                             (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6361                             "Attempt to reset command buffer (0x%" PRIxLEAST64 ") created from command pool (0x%" PRIxLEAST64
6362                             ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6363                             (uint64_t)commandBuffer, (uint64_t)cmdPool);
6364    }
6365    skip_call |= checkCommandBufferInFlight(dev_data, pCB, "reset");
6366    lock.unlock();
6367    if (skip_call)
6368        return VK_ERROR_VALIDATION_FAILED_EXT;
6369    VkResult result = dev_data->device_dispatch_table->ResetCommandBuffer(commandBuffer, flags);
6370    if (VK_SUCCESS == result) {
6371        lock.lock();
6372        dev_data->globalInFlightCmdBuffers.erase(commandBuffer);
6373        resetCB(dev_data, commandBuffer);
6374        lock.unlock();
6375    }
6376    return result;
6377}
6378
6379VKAPI_ATTR void VKAPI_CALL
6380CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
6381    bool skipCall = false;
6382    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6383    std::unique_lock<std::mutex> lock(global_lock);
6384    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6385    if (pCB) {
6386        skipCall |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
6387        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) {
6388            skipCall |=
6389                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
6390                        (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
6391                        "Incorrectly binding compute pipeline (0x%" PRIxLEAST64 ") during active RenderPass (0x%" PRIxLEAST64 ")",
6392                        (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass->renderPass);
6393        }
6394
6395        PIPELINE_NODE *pPN = getPipeline(dev_data, pipeline);
6396        if (pPN) {
6397            pCB->lastBound[pipelineBindPoint].pipeline = pipeline;
6398            set_cb_pso_status(pCB, pPN);
6399            set_pipeline_state(pPN);
6400        } else {
6401            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
6402                                (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
6403                                "Attempt to bind Pipeline 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline));
6404        }
6405    }
6406    lock.unlock();
6407    if (!skipCall)
6408        dev_data->device_dispatch_table->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
6409}
6410
6411VKAPI_ATTR void VKAPI_CALL
6412CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
6413    bool skipCall = false;
6414    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6415    std::unique_lock<std::mutex> lock(global_lock);
6416    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6417    if (pCB) {
6418        skipCall |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
6419        pCB->status |= CBSTATUS_VIEWPORT_SET;
6420        pCB->viewports.resize(viewportCount);
6421        memcpy(pCB->viewports.data(), pViewports, viewportCount * sizeof(VkViewport));
6422    }
6423    lock.unlock();
6424    if (!skipCall)
6425        dev_data->device_dispatch_table->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
6426}
6427
6428VKAPI_ATTR void VKAPI_CALL
6429CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
6430    bool skipCall = false;
6431    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6432    std::unique_lock<std::mutex> lock(global_lock);
6433    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6434    if (pCB) {
6435        skipCall |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
6436        pCB->status |= CBSTATUS_SCISSOR_SET;
6437        pCB->scissors.resize(scissorCount);
6438        memcpy(pCB->scissors.data(), pScissors, scissorCount * sizeof(VkRect2D));
6439    }
6440    lock.unlock();
6441    if (!skipCall)
6442        dev_data->device_dispatch_table->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
6443}
6444
6445VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
6446    bool skip_call = false;
6447    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6448    std::unique_lock<std::mutex> lock(global_lock);
6449    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6450    if (pCB) {
6451        skip_call |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
6452        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
6453
6454        PIPELINE_NODE *pPipeTrav = getPipeline(dev_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
6455        if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) {
6456            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
6457                                 reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SET, "DS",
6458                                 "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH "
6459                                 "flag.  This is undefined behavior and could be ignored.");
6460        } else {
6461            skip_call |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, reinterpret_cast<uint64_t &>(commandBuffer), lineWidth);
6462        }
6463    }
6464    lock.unlock();
6465    if (!skip_call)
6466        dev_data->device_dispatch_table->CmdSetLineWidth(commandBuffer, lineWidth);
6467}
6468
6469VKAPI_ATTR void VKAPI_CALL
6470CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
6471    bool skipCall = false;
6472    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6473    std::unique_lock<std::mutex> lock(global_lock);
6474    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6475    if (pCB) {
6476        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
6477        pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
6478    }
6479    lock.unlock();
6480    if (!skipCall)
6481        dev_data->device_dispatch_table->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp,
6482                                                         depthBiasSlopeFactor);
6483}
6484
6485VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
6486    bool skipCall = false;
6487    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6488    std::unique_lock<std::mutex> lock(global_lock);
6489    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6490    if (pCB) {
6491        skipCall |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
6492        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
6493    }
6494    lock.unlock();
6495    if (!skipCall)
6496        dev_data->device_dispatch_table->CmdSetBlendConstants(commandBuffer, blendConstants);
6497}
6498
6499VKAPI_ATTR void VKAPI_CALL
6500CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
6501    bool skipCall = false;
6502    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6503    std::unique_lock<std::mutex> lock(global_lock);
6504    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6505    if (pCB) {
6506        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
6507        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
6508    }
6509    lock.unlock();
6510    if (!skipCall)
6511        dev_data->device_dispatch_table->CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
6512}
6513
6514VKAPI_ATTR void VKAPI_CALL
6515CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
6516    bool skipCall = false;
6517    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6518    std::unique_lock<std::mutex> lock(global_lock);
6519    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6520    if (pCB) {
6521        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
6522        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
6523    }
6524    lock.unlock();
6525    if (!skipCall)
6526        dev_data->device_dispatch_table->CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
6527}
6528
6529VKAPI_ATTR void VKAPI_CALL
6530CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
6531    bool skipCall = false;
6532    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6533    std::unique_lock<std::mutex> lock(global_lock);
6534    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6535    if (pCB) {
6536        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
6537        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
6538    }
6539    lock.unlock();
6540    if (!skipCall)
6541        dev_data->device_dispatch_table->CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
6542}
6543
6544VKAPI_ATTR void VKAPI_CALL
6545CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
6546    bool skipCall = false;
6547    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6548    std::unique_lock<std::mutex> lock(global_lock);
6549    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6550    if (pCB) {
6551        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
6552        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
6553    }
6554    lock.unlock();
6555    if (!skipCall)
6556        dev_data->device_dispatch_table->CmdSetStencilReference(commandBuffer, faceMask, reference);
6557}
6558
6559VKAPI_ATTR void VKAPI_CALL
6560CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
6561                      uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
6562                      const uint32_t *pDynamicOffsets) {
6563    bool skipCall = false;
6564    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6565    std::unique_lock<std::mutex> lock(global_lock);
6566    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6567    if (pCB) {
6568        if (pCB->state == CB_RECORDING) {
6569            // Track total count of dynamic descriptor types to make sure we have an offset for each one
6570            uint32_t totalDynamicDescriptors = 0;
6571            string errorString = "";
6572            uint32_t lastSetIndex = firstSet + setCount - 1;
6573            if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
6574                pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
6575                pCB->lastBound[pipelineBindPoint].dynamicOffsets.resize(lastSetIndex + 1);
6576            }
6577            auto oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
6578            for (uint32_t i = 0; i < setCount; i++) {
6579                cvdescriptorset::DescriptorSet *pSet = getSetNode(dev_data, pDescriptorSets[i]);
6580                if (pSet) {
6581                    pCB->lastBound[pipelineBindPoint].uniqueBoundSets.insert(pSet);
6582                    pSet->BindCommandBuffer(pCB);
6583                    pCB->lastBound[pipelineBindPoint].pipelineLayout = layout;
6584                    pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pSet;
6585                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
6586                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6587                                        DRAWSTATE_NONE, "DS", "DS 0x%" PRIxLEAST64 " bound on pipeline %s",
6588                                        (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
6589                    if (!pSet->IsUpdated() && (pSet->GetTotalDescriptorCount() != 0)) {
6590                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
6591                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
6592                                            __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
6593                                            "DS 0x%" PRIxLEAST64
6594                                            " bound but it was never updated. You may want to either update it or not bind it.",
6595                                            (uint64_t)pDescriptorSets[i]);
6596                    }
6597                    // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
6598                    if (!verify_set_layout_compatibility(dev_data, pSet, layout, i + firstSet, errorString)) {
6599                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6600                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6601                                            DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
6602                                            "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout "
6603                                            "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s",
6604                                            i, i + firstSet, reinterpret_cast<uint64_t &>(layout), errorString.c_str());
6605                    }
6606
6607                    auto setDynamicDescriptorCount = pSet->GetDynamicDescriptorCount();
6608
6609                    pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i].clear();
6610
6611                    if (setDynamicDescriptorCount) {
6612                        // First make sure we won't overstep bounds of pDynamicOffsets array
6613                        if ((totalDynamicDescriptors + setDynamicDescriptorCount) > dynamicOffsetCount) {
6614                            skipCall |=
6615                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6616                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6617                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
6618                                        "descriptorSet #%u (0x%" PRIxLEAST64
6619                                        ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
6620                                        "array. There must be one dynamic offset for each dynamic descriptor being bound.",
6621                                        i, (uint64_t)pDescriptorSets[i], pSet->GetDynamicDescriptorCount(),
6622                                        (dynamicOffsetCount - totalDynamicDescriptors));
6623                        } else { // Validate and store dynamic offsets with the set
6624                            // Validate Dynamic Offset Minimums
6625                            uint32_t cur_dyn_offset = totalDynamicDescriptors;
6626                            for (uint32_t d = 0; d < pSet->GetTotalDescriptorCount(); d++) {
6627                                if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
6628                                    if (vk_safe_modulo(
6629                                            pDynamicOffsets[cur_dyn_offset],
6630                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
6631                                        skipCall |= log_msg(
6632                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6633                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
6634                                            DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
6635                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
6636                                            "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64,
6637                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
6638                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
6639                                    }
6640                                    cur_dyn_offset++;
6641                                } else if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
6642                                    if (vk_safe_modulo(
6643                                            pDynamicOffsets[cur_dyn_offset],
6644                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
6645                                        skipCall |= log_msg(
6646                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6647                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
6648                                            DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
6649                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
6650                                            "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64,
6651                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
6652                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
6653                                    }
6654                                    cur_dyn_offset++;
6655                                }
6656                            }
6657
6658                            pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i] =
6659                                std::vector<uint32_t>(pDynamicOffsets + totalDynamicDescriptors,
6660                                                      pDynamicOffsets + totalDynamicDescriptors + setDynamicDescriptorCount);
6661                            // Keep running total of dynamic descriptor count to verify at the end
6662                            totalDynamicDescriptors += setDynamicDescriptorCount;
6663
6664                        }
6665                    }
6666                } else {
6667                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6668                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6669                                        DRAWSTATE_INVALID_SET, "DS", "Attempt to bind DS 0x%" PRIxLEAST64 " that doesn't exist!",
6670                                        (uint64_t)pDescriptorSets[i]);
6671                }
6672                skipCall |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
6673                // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
6674                if (firstSet > 0) { // Check set #s below the first bound set
6675                    for (uint32_t i = 0; i < firstSet; ++i) {
6676                        if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
6677                            !verify_set_layout_compatibility(dev_data, pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i],
6678                                                             layout, i, errorString)) {
6679                            skipCall |= log_msg(
6680                                dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
6681                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6682                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
6683                                "DescriptorSetDS 0x%" PRIxLEAST64
6684                                " previously bound as set #%u was disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
6685                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
6686                            pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
6687                        }
6688                    }
6689                }
6690                // Check if newly last bound set invalidates any remaining bound sets
6691                if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
6692                    if (oldFinalBoundSet &&
6693                        !verify_set_layout_compatibility(dev_data, oldFinalBoundSet, layout, lastSetIndex, errorString)) {
6694                        auto old_set = oldFinalBoundSet->GetSet();
6695                        skipCall |=
6696                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
6697                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<uint64_t &>(old_set), __LINE__,
6698                                    DRAWSTATE_NONE, "DS", "DescriptorSetDS 0x%" PRIxLEAST64
6699                                                          " previously bound as set #%u is incompatible with set 0x%" PRIxLEAST64
6700                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
6701                                                          "disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
6702                                    reinterpret_cast<uint64_t &>(old_set), lastSetIndex,
6703                                    (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
6704                                    lastSetIndex + 1, (uint64_t)layout);
6705                        pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
6706                    }
6707                }
6708            }
6709            //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
6710            if (totalDynamicDescriptors != dynamicOffsetCount) {
6711                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6712                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
6713                                    DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
6714                                    "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
6715                                    "is %u. It should exactly match the number of dynamic descriptors.",
6716                                    setCount, totalDynamicDescriptors, dynamicOffsetCount);
6717            }
6718        } else {
6719            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
6720        }
6721    }
6722    lock.unlock();
6723    if (!skipCall)
6724        dev_data->device_dispatch_table->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
6725                                                               pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
6726}
6727
6728VKAPI_ATTR void VKAPI_CALL
6729CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
6730    bool skipCall = false;
6731    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6732    // TODO : Somewhere need to verify that IBs have correct usage state flagged
6733    std::unique_lock<std::mutex> lock(global_lock);
6734    VkDeviceMemory mem;
6735    skipCall =
6736        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6737    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
6738    if (cb_data != dev_data->commandBufferMap.end()) {
6739        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindIndexBuffer()"); };
6740        cb_data->second->validate_functions.push_back(function);
6741        skipCall |= addCmd(dev_data, cb_data->second, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
6742        VkDeviceSize offset_align = 0;
6743        switch (indexType) {
6744        case VK_INDEX_TYPE_UINT16:
6745            offset_align = 2;
6746            break;
6747        case VK_INDEX_TYPE_UINT32:
6748            offset_align = 4;
6749            break;
6750        default:
6751            // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
6752            break;
6753        }
6754        if (!offset_align || (offset % offset_align)) {
6755            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6756                                DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
6757                                "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
6758                                offset, string_VkIndexType(indexType));
6759        }
6760        cb_data->second->status |= CBSTATUS_INDEX_BUFFER_BOUND;
6761    }
6762    lock.unlock();
6763    if (!skipCall)
6764        dev_data->device_dispatch_table->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
6765}
6766
6767void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
6768    uint32_t end = firstBinding + bindingCount;
6769    if (pCB->currentDrawData.buffers.size() < end) {
6770        pCB->currentDrawData.buffers.resize(end);
6771    }
6772    for (uint32_t i = 0; i < bindingCount; ++i) {
6773        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
6774    }
6775}
6776
6777static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
6778
6779VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
6780                                                uint32_t bindingCount, const VkBuffer *pBuffers,
6781                                                const VkDeviceSize *pOffsets) {
6782    bool skipCall = false;
6783    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6784    // TODO : Somewhere need to verify that VBs have correct usage state flagged
6785    std::unique_lock<std::mutex> lock(global_lock);
6786    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
6787    if (cb_data != dev_data->commandBufferMap.end()) {
6788        for (uint32_t i = 0; i < bindingCount; ++i) {
6789            VkDeviceMemory mem;
6790            skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)pBuffers[i], VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6791
6792            std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindVertexBuffers()"); };
6793            cb_data->second->validate_functions.push_back(function);
6794        }
6795        addCmd(dev_data, cb_data->second, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
6796        updateResourceTracking(cb_data->second, firstBinding, bindingCount, pBuffers);
6797    } else {
6798        skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
6799    }
6800    lock.unlock();
6801    if (!skipCall)
6802        dev_data->device_dispatch_table->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
6803}
6804
6805/* expects global_lock to be held by caller */
6806static bool markStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
6807    bool skip_call = false;
6808
6809    for (auto imageView : pCB->updateImages) {
6810        auto iv_data = getImageViewData(dev_data, imageView);
6811        if (!iv_data)
6812            continue;
6813        VkImage image = iv_data->image;
6814        VkDeviceMemory mem;
6815        skip_call |=
6816            get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
6817        std::function<bool()> function = [=]() {
6818            set_memory_valid(dev_data, mem, true, image);
6819            return false;
6820        };
6821        pCB->validate_functions.push_back(function);
6822    }
6823    for (auto buffer : pCB->updateBuffers) {
6824        VkDeviceMemory mem;
6825        skip_call |= get_mem_binding_from_object(dev_data, (uint64_t)buffer,
6826                                                 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6827        std::function<bool()> function = [=]() {
6828            set_memory_valid(dev_data, mem, true);
6829            return false;
6830        };
6831        pCB->validate_functions.push_back(function);
6832    }
6833    return skip_call;
6834}
6835
6836VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
6837                                   uint32_t firstVertex, uint32_t firstInstance) {
6838    bool skipCall = false;
6839    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6840    std::unique_lock<std::mutex> lock(global_lock);
6841    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6842    if (pCB) {
6843        skipCall |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()");
6844        pCB->drawCount[DRAW]++;
6845        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS);
6846        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6847        // TODO : Need to pass commandBuffer as srcObj here
6848        skipCall |=
6849            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
6850                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW]++);
6851        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
6852        if (!skipCall) {
6853            updateResourceTrackingOnDraw(pCB);
6854        }
6855        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDraw");
6856    }
6857    lock.unlock();
6858    if (!skipCall)
6859        dev_data->device_dispatch_table->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
6860}
6861
6862VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
6863                                          uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
6864                                                            uint32_t firstInstance) {
6865    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6866    bool skipCall = false;
6867    std::unique_lock<std::mutex> lock(global_lock);
6868    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6869    if (pCB) {
6870        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()");
6871        pCB->drawCount[DRAW_INDEXED]++;
6872        skipCall |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS);
6873        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6874        // TODO : Need to pass commandBuffer as srcObj here
6875        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
6876                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
6877                            "vkCmdDrawIndexed() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++);
6878        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
6879        if (!skipCall) {
6880            updateResourceTrackingOnDraw(pCB);
6881        }
6882        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed");
6883    }
6884    lock.unlock();
6885    if (!skipCall)
6886        dev_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset,
6887                                                        firstInstance);
6888}
6889
6890VKAPI_ATTR void VKAPI_CALL
6891CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
6892    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6893    bool skipCall = false;
6894    std::unique_lock<std::mutex> lock(global_lock);
6895    VkDeviceMemory mem;
6896    // MTMTODO : merge with code below
6897    skipCall =
6898        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6899    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndirect");
6900    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6901    if (pCB) {
6902        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()");
6903        pCB->drawCount[DRAW_INDIRECT]++;
6904        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS);
6905        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6906        // TODO : Need to pass commandBuffer as srcObj here
6907        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
6908                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
6909                            "vkCmdDrawIndirect() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++);
6910        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
6911        if (!skipCall) {
6912            updateResourceTrackingOnDraw(pCB);
6913        }
6914        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndirect");
6915    }
6916    lock.unlock();
6917    if (!skipCall)
6918        dev_data->device_dispatch_table->CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
6919}
6920
6921VKAPI_ATTR void VKAPI_CALL
6922CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
6923    bool skipCall = false;
6924    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6925    std::unique_lock<std::mutex> lock(global_lock);
6926    VkDeviceMemory mem;
6927    // MTMTODO : merge with code below
6928    skipCall =
6929        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6930    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndexedIndirect");
6931    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6932    if (pCB) {
6933        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()");
6934        pCB->drawCount[DRAW_INDEXED_INDIRECT]++;
6935        skipCall |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS);
6936        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6937        // TODO : Need to pass commandBuffer as srcObj here
6938        skipCall |=
6939            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
6940                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call 0x%" PRIx64 ", reporting DS state:",
6941                    g_drawCount[DRAW_INDEXED_INDIRECT]++);
6942        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
6943        if (!skipCall) {
6944            updateResourceTrackingOnDraw(pCB);
6945        }
6946        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexedIndirect");
6947    }
6948    lock.unlock();
6949    if (!skipCall)
6950        dev_data->device_dispatch_table->CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
6951}
6952
6953VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
6954    bool skipCall = false;
6955    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6956    std::unique_lock<std::mutex> lock(global_lock);
6957    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6958    if (pCB) {
6959        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE);
6960        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6961        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()");
6962        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatch");
6963    }
6964    lock.unlock();
6965    if (!skipCall)
6966        dev_data->device_dispatch_table->CmdDispatch(commandBuffer, x, y, z);
6967}
6968
6969VKAPI_ATTR void VKAPI_CALL
6970CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
6971    bool skipCall = false;
6972    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6973    std::unique_lock<std::mutex> lock(global_lock);
6974    VkDeviceMemory mem;
6975    skipCall =
6976        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6977    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDispatchIndirect");
6978    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6979    if (pCB) {
6980        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE);
6981        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6982        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()");
6983        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatchIndirect");
6984    }
6985    lock.unlock();
6986    if (!skipCall)
6987        dev_data->device_dispatch_table->CmdDispatchIndirect(commandBuffer, buffer, offset);
6988}
6989
6990VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
6991                                         uint32_t regionCount, const VkBufferCopy *pRegions) {
6992    bool skipCall = false;
6993    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6994    std::unique_lock<std::mutex> lock(global_lock);
6995    VkDeviceMemory src_mem, dst_mem;
6996    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &src_mem);
6997    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdCopyBuffer");
6998    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &dst_mem);
6999
7000    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdCopyBuffer");
7001    // Validate that SRC & DST buffers have correct usage flags set
7002    skipCall |= validate_buffer_usage_flags(dev_data, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
7003                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7004    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7005                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7006    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7007    if (cb_data != dev_data->commandBufferMap.end()) {
7008        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, src_mem, "vkCmdCopyBuffer()"); };
7009        cb_data->second->validate_functions.push_back(function);
7010        function = [=]() {
7011            set_memory_valid(dev_data, dst_mem, true);
7012            return false;
7013        };
7014        cb_data->second->validate_functions.push_back(function);
7015
7016        skipCall |= addCmd(dev_data, cb_data->second, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
7017        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyBuffer");
7018    }
7019    lock.unlock();
7020    if (!skipCall)
7021        dev_data->device_dispatch_table->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
7022}
7023
7024static bool VerifySourceImageLayout(VkCommandBuffer cmdBuffer, VkImage srcImage, VkImageSubresourceLayers subLayers,
7025                                    VkImageLayout srcImageLayout) {
7026    bool skip_call = false;
7027
7028    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7029    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7030    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7031        uint32_t layer = i + subLayers.baseArrayLayer;
7032        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7033        IMAGE_CMD_BUF_LAYOUT_NODE node;
7034        if (!FindLayout(pCB, srcImage, sub, node)) {
7035            SetLayout(pCB, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout));
7036            continue;
7037        }
7038        if (node.layout != srcImageLayout) {
7039            // TODO: Improve log message in the next pass
7040            skip_call |=
7041                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7042                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
7043                                                                        "and doesn't match the current layout %s.",
7044                        string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
7045        }
7046    }
7047    if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
7048        if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7049            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7050            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7051                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7052                                 "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
7053        } else {
7054            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7055                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be "
7056                                                                       "TRANSFER_SRC_OPTIMAL or GENERAL.",
7057                                 string_VkImageLayout(srcImageLayout));
7058        }
7059    }
7060    return skip_call;
7061}
7062
7063static bool VerifyDestImageLayout(VkCommandBuffer cmdBuffer, VkImage destImage, VkImageSubresourceLayers subLayers,
7064                                  VkImageLayout destImageLayout) {
7065    bool skip_call = false;
7066
7067    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7068    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7069    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7070        uint32_t layer = i + subLayers.baseArrayLayer;
7071        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7072        IMAGE_CMD_BUF_LAYOUT_NODE node;
7073        if (!FindLayout(pCB, destImage, sub, node)) {
7074            SetLayout(pCB, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout));
7075            continue;
7076        }
7077        if (node.layout != destImageLayout) {
7078            skip_call |=
7079                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7080                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
7081                                                                        "doesn't match the current layout %s.",
7082                        string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
7083        }
7084    }
7085    if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
7086        if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7087            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7088            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7089                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7090                                 "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
7091        } else {
7092            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7093                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be "
7094                                                                       "TRANSFER_DST_OPTIMAL or GENERAL.",
7095                                 string_VkImageLayout(destImageLayout));
7096        }
7097    }
7098    return skip_call;
7099}
7100
7101VKAPI_ATTR void VKAPI_CALL
7102CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7103             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
7104    bool skipCall = false;
7105    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7106    std::unique_lock<std::mutex> lock(global_lock);
7107    VkDeviceMemory src_mem, dst_mem;
7108    // Validate that src & dst images have correct usage flags set
7109    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &src_mem);
7110    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdCopyImage");
7111
7112    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &dst_mem);
7113    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdCopyImage");
7114    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7115                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7116    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7117                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7118    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7119    if (cb_data != dev_data->commandBufferMap.end()) {
7120        std::function<bool()> function = [=]() {
7121            return validate_memory_is_valid(dev_data, src_mem, "vkCmdCopyImage()", srcImage);
7122        };
7123        cb_data->second->validate_functions.push_back(function);
7124        function = [=]() {
7125            set_memory_valid(dev_data, dst_mem, true, dstImage);
7126            return false;
7127        };
7128        cb_data->second->validate_functions.push_back(function);
7129
7130        skipCall |= addCmd(dev_data, cb_data->second, CMD_COPYIMAGE, "vkCmdCopyImage()");
7131        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyImage");
7132        for (uint32_t i = 0; i < regionCount; ++i) {
7133            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].srcSubresource, srcImageLayout);
7134            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].dstSubresource, dstImageLayout);
7135        }
7136    }
7137    lock.unlock();
7138    if (!skipCall)
7139        dev_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7140                                                      regionCount, pRegions);
7141}
7142
7143VKAPI_ATTR void VKAPI_CALL
7144CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7145             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
7146    bool skipCall = false;
7147    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7148    std::unique_lock<std::mutex> lock(global_lock);
7149    VkDeviceMemory src_mem, dst_mem;
7150    // Validate that src & dst images have correct usage flags set
7151    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &src_mem);
7152    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdBlitImage");
7153
7154    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &dst_mem);
7155    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdBlitImage");
7156    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7157                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7158    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7159                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7160
7161    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7162    if (cb_data != dev_data->commandBufferMap.end()) {
7163        std::function<bool()> function = [=]() {
7164            return validate_memory_is_valid(dev_data, src_mem, "vkCmdBlitImage()", srcImage);
7165        };
7166        cb_data->second->validate_functions.push_back(function);
7167        function = [=]() {
7168            set_memory_valid(dev_data, dst_mem, true, dstImage);
7169            return false;
7170        };
7171        cb_data->second->validate_functions.push_back(function);
7172
7173        skipCall |= addCmd(dev_data, cb_data->second, CMD_BLITIMAGE, "vkCmdBlitImage()");
7174        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdBlitImage");
7175    }
7176    lock.unlock();
7177    if (!skipCall)
7178        dev_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7179                                                      regionCount, pRegions, filter);
7180}
7181
7182VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
7183                                                VkImage dstImage, VkImageLayout dstImageLayout,
7184                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
7185    bool skipCall = false;
7186    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7187    std::unique_lock<std::mutex> lock(global_lock);
7188    VkDeviceMemory dst_mem, src_mem;
7189    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &dst_mem);
7190    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdCopyBufferToImage");
7191
7192    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &src_mem);
7193    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdCopyBufferToImage");
7194    // Validate that src buff & dst image have correct usage flags set
7195    skipCall |= validate_buffer_usage_flags(dev_data, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyBufferToImage()",
7196                                            "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7197    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyBufferToImage()",
7198                                           "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7199    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7200    if (cb_data != dev_data->commandBufferMap.end()) {
7201        std::function<bool()> function = [=]() {
7202            set_memory_valid(dev_data, dst_mem, true, dstImage);
7203            return false;
7204        };
7205        cb_data->second->validate_functions.push_back(function);
7206        function = [=]() { return validate_memory_is_valid(dev_data, src_mem, "vkCmdCopyBufferToImage()"); };
7207        cb_data->second->validate_functions.push_back(function);
7208
7209        skipCall |= addCmd(dev_data, cb_data->second, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
7210        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyBufferToImage");
7211        for (uint32_t i = 0; i < regionCount; ++i) {
7212            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].imageSubresource, dstImageLayout);
7213        }
7214    }
7215    lock.unlock();
7216    if (!skipCall)
7217        dev_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount,
7218                                                              pRegions);
7219}
7220
7221VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
7222                                                VkImageLayout srcImageLayout, VkBuffer dstBuffer,
7223                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
7224    bool skipCall = false;
7225    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7226    std::unique_lock<std::mutex> lock(global_lock);
7227    VkDeviceMemory src_mem, dst_mem;
7228    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &src_mem);
7229    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdCopyImageToBuffer");
7230
7231    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &dst_mem);
7232    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdCopyImageToBuffer");
7233    // Validate that dst buff & src image have correct usage flags set
7234    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyImageToBuffer()",
7235                                           "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7236    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyImageToBuffer()",
7237                                            "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7238
7239    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7240    if (cb_data != dev_data->commandBufferMap.end()) {
7241        std::function<bool()> function = [=]() {
7242            return validate_memory_is_valid(dev_data, src_mem, "vkCmdCopyImageToBuffer()", srcImage);
7243        };
7244        cb_data->second->validate_functions.push_back(function);
7245        function = [=]() {
7246            set_memory_valid(dev_data, dst_mem, true);
7247            return false;
7248        };
7249        cb_data->second->validate_functions.push_back(function);
7250
7251        skipCall |= addCmd(dev_data, cb_data->second, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
7252        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyImageToBuffer");
7253        for (uint32_t i = 0; i < regionCount; ++i) {
7254            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].imageSubresource, srcImageLayout);
7255        }
7256    }
7257    lock.unlock();
7258    if (!skipCall)
7259        dev_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount,
7260                                                              pRegions);
7261}
7262
7263VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
7264                                           VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
7265    bool skipCall = false;
7266    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7267    std::unique_lock<std::mutex> lock(global_lock);
7268    VkDeviceMemory mem;
7269    skipCall =
7270        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7271    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdUpdateBuffer");
7272    // Validate that dst buff has correct usage flags set
7273    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdUpdateBuffer()",
7274                                            "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7275
7276    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7277    if (cb_data != dev_data->commandBufferMap.end()) {
7278        std::function<bool()> function = [=]() {
7279            set_memory_valid(dev_data, mem, true);
7280            return false;
7281        };
7282        cb_data->second->validate_functions.push_back(function);
7283
7284        skipCall |= addCmd(dev_data, cb_data->second, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
7285        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyUpdateBuffer");
7286    }
7287    lock.unlock();
7288    if (!skipCall)
7289        dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
7290}
7291
7292VKAPI_ATTR void VKAPI_CALL
7293CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
7294    bool skipCall = false;
7295    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7296    std::unique_lock<std::mutex> lock(global_lock);
7297    VkDeviceMemory mem;
7298    skipCall =
7299        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7300    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdFillBuffer");
7301    // Validate that dst buff has correct usage flags set
7302    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdFillBuffer()",
7303                                            "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7304
7305    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7306    if (cb_data != dev_data->commandBufferMap.end()) {
7307        std::function<bool()> function = [=]() {
7308            set_memory_valid(dev_data, mem, true);
7309            return false;
7310        };
7311        cb_data->second->validate_functions.push_back(function);
7312
7313        skipCall |= addCmd(dev_data, cb_data->second, CMD_FILLBUFFER, "vkCmdFillBuffer()");
7314        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyFillBuffer");
7315    }
7316    lock.unlock();
7317    if (!skipCall)
7318        dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
7319}
7320
7321VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
7322                                               const VkClearAttachment *pAttachments, uint32_t rectCount,
7323                                               const VkClearRect *pRects) {
7324    bool skipCall = false;
7325    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7326    std::unique_lock<std::mutex> lock(global_lock);
7327    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7328    if (pCB) {
7329        skipCall |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
7330        // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
7331        if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
7332            (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
7333            // TODO : commandBuffer should be srcObj
7334            // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
7335            // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
7336            // call CmdClearAttachments
7337            // Otherwise this seems more like a performance warning.
7338            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7339                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
7340                                "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
7341                                " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
7342                                (uint64_t)(commandBuffer));
7343        }
7344        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments");
7345    }
7346
7347    // Validate that attachment is in reference list of active subpass
7348    if (pCB->activeRenderPass) {
7349        const VkRenderPassCreateInfo *pRPCI = pCB->activeRenderPass->pCreateInfo;
7350        const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
7351
7352        for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) {
7353            const VkClearAttachment *attachment = &pAttachments[attachment_idx];
7354            if (attachment->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
7355                bool found = false;
7356                for (uint32_t i = 0; i < pSD->colorAttachmentCount; i++) {
7357                    if (attachment->colorAttachment == pSD->pColorAttachments[i].attachment) {
7358                        found = true;
7359                        break;
7360                    }
7361                }
7362                if (!found) {
7363                    skipCall |= log_msg(
7364                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7365                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
7366                        "vkCmdClearAttachments() attachment index %d not found in attachment reference array of active subpass %d",
7367                        attachment->colorAttachment, pCB->activeSubpass);
7368                }
7369            } else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
7370                if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
7371                    (pSD->pDepthStencilAttachment->attachment ==
7372                     VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
7373
7374                    skipCall |= log_msg(
7375                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7376                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
7377                        "vkCmdClearAttachments() attachment index %d does not match depthStencilAttachment.attachment (%d) found "
7378                        "in active subpass %d",
7379                        attachment->colorAttachment,
7380                        (pSD->pDepthStencilAttachment) ? pSD->pDepthStencilAttachment->attachment : VK_ATTACHMENT_UNUSED,
7381                        pCB->activeSubpass);
7382                }
7383            }
7384        }
7385    }
7386    lock.unlock();
7387    if (!skipCall)
7388        dev_data->device_dispatch_table->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
7389}
7390
7391VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
7392                                              VkImageLayout imageLayout, const VkClearColorValue *pColor,
7393                                              uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
7394    bool skipCall = false;
7395    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7396    std::unique_lock<std::mutex> lock(global_lock);
7397    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
7398    VkDeviceMemory mem;
7399    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7400    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearColorImage");
7401    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7402    if (cb_data != dev_data->commandBufferMap.end()) {
7403        std::function<bool()> function = [=]() {
7404            set_memory_valid(dev_data, mem, true, image);
7405            return false;
7406        };
7407        cb_data->second->validate_functions.push_back(function);
7408
7409        skipCall |= addCmd(dev_data, cb_data->second, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
7410        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdClearColorImage");
7411    }
7412    lock.unlock();
7413    if (!skipCall)
7414        dev_data->device_dispatch_table->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
7415}
7416
7417VKAPI_ATTR void VKAPI_CALL
7418CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
7419                          const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
7420                          const VkImageSubresourceRange *pRanges) {
7421    bool skipCall = false;
7422    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7423    std::unique_lock<std::mutex> lock(global_lock);
7424    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
7425    VkDeviceMemory mem;
7426    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7427    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearDepthStencilImage");
7428    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7429    if (cb_data != dev_data->commandBufferMap.end()) {
7430        std::function<bool()> function = [=]() {
7431            set_memory_valid(dev_data, mem, true, image);
7432            return false;
7433        };
7434        cb_data->second->validate_functions.push_back(function);
7435
7436        skipCall |= addCmd(dev_data, cb_data->second, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
7437        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdClearDepthStencilImage");
7438    }
7439    lock.unlock();
7440    if (!skipCall)
7441        dev_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount,
7442                                                                   pRanges);
7443}
7444
7445VKAPI_ATTR void VKAPI_CALL
7446CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7447                VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
7448    bool skipCall = false;
7449    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7450    std::unique_lock<std::mutex> lock(global_lock);
7451    VkDeviceMemory src_mem, dst_mem;
7452    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &src_mem);
7453    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdResolveImage");
7454
7455    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &dst_mem);
7456    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdResolveImage");
7457    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7458    if (cb_data != dev_data->commandBufferMap.end()) {
7459        std::function<bool()> function = [=]() {
7460            return validate_memory_is_valid(dev_data, src_mem, "vkCmdResolveImage()", srcImage);
7461        };
7462        cb_data->second->validate_functions.push_back(function);
7463        function = [=]() {
7464            set_memory_valid(dev_data, dst_mem, true, dstImage);
7465            return false;
7466        };
7467        cb_data->second->validate_functions.push_back(function);
7468
7469        skipCall |= addCmd(dev_data, cb_data->second, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
7470        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdResolveImage");
7471    }
7472    lock.unlock();
7473    if (!skipCall)
7474        dev_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7475                                                         regionCount, pRegions);
7476}
7477
7478bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7479    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7480    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7481    if (pCB) {
7482        pCB->eventToStageMap[event] = stageMask;
7483    }
7484    auto queue_data = dev_data->queueMap.find(queue);
7485    if (queue_data != dev_data->queueMap.end()) {
7486        queue_data->second.eventToStageMap[event] = stageMask;
7487    }
7488    return false;
7489}
7490
7491VKAPI_ATTR void VKAPI_CALL
7492CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7493    bool skipCall = false;
7494    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7495    std::unique_lock<std::mutex> lock(global_lock);
7496    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7497    if (pCB) {
7498        skipCall |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
7499        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent");
7500        pCB->events.push_back(event);
7501        if (!pCB->waitedEvents.count(event)) {
7502            pCB->writeEventsBeforeWait.push_back(event);
7503        }
7504        std::function<bool(VkQueue)> eventUpdate =
7505            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
7506        pCB->eventUpdates.push_back(eventUpdate);
7507    }
7508    lock.unlock();
7509    if (!skipCall)
7510        dev_data->device_dispatch_table->CmdSetEvent(commandBuffer, event, stageMask);
7511}
7512
7513VKAPI_ATTR void VKAPI_CALL
7514CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7515    bool skipCall = false;
7516    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7517    std::unique_lock<std::mutex> lock(global_lock);
7518    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7519    if (pCB) {
7520        skipCall |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
7521        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent");
7522        pCB->events.push_back(event);
7523        if (!pCB->waitedEvents.count(event)) {
7524            pCB->writeEventsBeforeWait.push_back(event);
7525        }
7526        std::function<bool(VkQueue)> eventUpdate =
7527            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
7528        pCB->eventUpdates.push_back(eventUpdate);
7529    }
7530    lock.unlock();
7531    if (!skipCall)
7532        dev_data->device_dispatch_table->CmdResetEvent(commandBuffer, event, stageMask);
7533}
7534
7535static bool TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
7536                                   const VkImageMemoryBarrier *pImgMemBarriers) {
7537    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7538    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7539    bool skip = false;
7540    uint32_t levelCount = 0;
7541    uint32_t layerCount = 0;
7542
7543    for (uint32_t i = 0; i < memBarrierCount; ++i) {
7544        auto mem_barrier = &pImgMemBarriers[i];
7545        if (!mem_barrier)
7546            continue;
7547        // TODO: Do not iterate over every possibility - consolidate where
7548        // possible
7549        ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
7550
7551        for (uint32_t j = 0; j < levelCount; j++) {
7552            uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
7553            for (uint32_t k = 0; k < layerCount; k++) {
7554                uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
7555                VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer};
7556                IMAGE_CMD_BUF_LAYOUT_NODE node;
7557                if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
7558                    SetLayout(pCB, mem_barrier->image, sub,
7559                              IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout));
7560                    continue;
7561                }
7562                if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
7563                    // TODO: Set memory invalid which is in mem_tracker currently
7564                } else if (node.layout != mem_barrier->oldLayout) {
7565                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
7566                                    __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s "
7567                                                                                    "when current layout is %s.",
7568                                    string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
7569                }
7570                SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
7571            }
7572        }
7573    }
7574    return skip;
7575}
7576
7577// Print readable FlagBits in FlagMask
7578static std::string string_VkAccessFlags(VkAccessFlags accessMask) {
7579    std::string result;
7580    std::string separator;
7581
7582    if (accessMask == 0) {
7583        result = "[None]";
7584    } else {
7585        result = "[";
7586        for (auto i = 0; i < 32; i++) {
7587            if (accessMask & (1 << i)) {
7588                result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
7589                separator = " | ";
7590            }
7591        }
7592        result = result + "]";
7593    }
7594    return result;
7595}
7596
7597// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
7598// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
7599// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
7600static bool ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
7601                             const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits,
7602                             const char *type) {
7603    bool skip_call = false;
7604
7605    if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
7606        if (accessMask & ~(required_bit | optional_bits)) {
7607            // TODO: Verify against Valid Use
7608            skip_call |=
7609                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7610                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
7611                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
7612        }
7613    } else {
7614        if (!required_bit) {
7615            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7616                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
7617                                                                  "%s when layout is %s, unless the app has previously added a "
7618                                                                  "barrier for this transition.",
7619                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
7620                                 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
7621        } else {
7622            std::string opt_bits;
7623            if (optional_bits != 0) {
7624                std::stringstream ss;
7625                ss << optional_bits;
7626                opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
7627            }
7628            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7629                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
7630                                                                  "layout is %s, unless the app has previously added a barrier for "
7631                                                                  "this transition.",
7632                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
7633                                 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
7634        }
7635    }
7636    return skip_call;
7637}
7638
7639static bool ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
7640                                        const VkImageLayout &layout, const char *type) {
7641    bool skip_call = false;
7642    switch (layout) {
7643    case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
7644        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
7645                                      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type);
7646        break;
7647    }
7648    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
7649        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
7650                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type);
7651        break;
7652    }
7653    case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
7654        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
7655        break;
7656    }
7657    case VK_IMAGE_LAYOUT_PREINITIALIZED: {
7658        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type);
7659        break;
7660    }
7661    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
7662        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
7663                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
7664        break;
7665    }
7666    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
7667        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
7668                                      VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
7669        break;
7670    }
7671    case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
7672        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
7673        break;
7674    }
7675    case VK_IMAGE_LAYOUT_UNDEFINED: {
7676        if (accessMask != 0) {
7677            // TODO: Verify against Valid Use section spec
7678            skip_call |=
7679                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7680                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
7681                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
7682        }
7683        break;
7684    }
7685    case VK_IMAGE_LAYOUT_GENERAL:
7686    default: { break; }
7687    }
7688    return skip_call;
7689}
7690
7691static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
7692                             const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
7693                             const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
7694                             const VkImageMemoryBarrier *pImageMemBarriers) {
7695    bool skip_call = false;
7696    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7697    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7698    if (pCB->activeRenderPass && memBarrierCount) {
7699        if (!pCB->activeRenderPass->hasSelfDependency[pCB->activeSubpass]) {
7700            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7701                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
7702                                                                  "with no self dependency specified.",
7703                                 funcName, pCB->activeSubpass);
7704        }
7705    }
7706    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
7707        auto mem_barrier = &pImageMemBarriers[i];
7708        auto image_data = getImageNode(dev_data, mem_barrier->image);
7709        if (image_data) {
7710            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
7711            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
7712            if (image_data->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
7713                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
7714                // be VK_QUEUE_FAMILY_IGNORED
7715                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
7716                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
7717                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
7718                                         "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
7719                                         "VK_SHARING_MODE_CONCURRENT.  Src and dst "
7720                                         " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
7721                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
7722                }
7723            } else {
7724                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
7725                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
7726                // or both be a valid queue family
7727                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
7728                    (src_q_f_index != dst_q_f_index)) {
7729                    skip_call |=
7730                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7731                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
7732                                                                     "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
7733                                                                     "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
7734                                                                     "must be.",
7735                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
7736                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
7737                           ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
7738                            (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) {
7739                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
7740                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
7741                                         "%s: Image 0x%" PRIx64 " was created with sharingMode "
7742                                         "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
7743                                         " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
7744                                         "queueFamilies crated for this device.",
7745                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index,
7746                                         dst_q_f_index, dev_data->phys_dev_properties.queue_family_properties.size());
7747                }
7748            }
7749        }
7750
7751        if (mem_barrier) {
7752            skip_call |=
7753                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
7754            skip_call |=
7755                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
7756            if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
7757                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7758                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
7759                                                         "PREINITIALIZED.",
7760                        funcName);
7761            }
7762            auto image_data = getImageNode(dev_data, mem_barrier->image);
7763            VkFormat format = VK_FORMAT_UNDEFINED;
7764            uint32_t arrayLayers = 0, mipLevels = 0;
7765            bool imageFound = false;
7766            if (image_data) {
7767                format = image_data->createInfo.format;
7768                arrayLayers = image_data->createInfo.arrayLayers;
7769                mipLevels = image_data->createInfo.mipLevels;
7770                imageFound = true;
7771            } else if (dev_data->device_extensions.wsi_enabled) {
7772                auto imageswap_data = getSwapchainFromImage(dev_data, mem_barrier->image);
7773                if (imageswap_data) {
7774                    auto swapchain_data = getSwapchainNode(dev_data, imageswap_data);
7775                    if (swapchain_data) {
7776                        format = swapchain_data->createInfo.imageFormat;
7777                        arrayLayers = swapchain_data->createInfo.imageArrayLayers;
7778                        mipLevels = 1;
7779                        imageFound = true;
7780                    }
7781                }
7782            }
7783            if (imageFound) {
7784                if (vk_format_is_depth_and_stencil(format) &&
7785                    (!(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) ||
7786                     !(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT))) {
7787                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7788                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth and stencil format and thus must "
7789                                                             "have both VK_IMAGE_ASPECT_DEPTH_BIT and "
7790                                                             "VK_IMAGE_ASPECT_STENCIL_BIT set.",
7791                            funcName);
7792                }
7793                int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
7794                                     ? 1
7795                                     : mem_barrier->subresourceRange.layerCount;
7796                if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
7797                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7798                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
7799                                                             "baseArrayLayer (%d) and layerCount (%d) be less "
7800                                                             "than or equal to the total number of layers (%d).",
7801                            funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount,
7802                            arrayLayers);
7803                }
7804                int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
7805                                     ? 1
7806                                     : mem_barrier->subresourceRange.levelCount;
7807                if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
7808                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7809                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
7810                                                             "(%d) and levelCount (%d) be less than or equal to "
7811                                                             "the total number of levels (%d).",
7812                            funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount,
7813                            mipLevels);
7814                }
7815            }
7816        }
7817    }
7818    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
7819        auto mem_barrier = &pBufferMemBarriers[i];
7820        if (pCB->activeRenderPass) {
7821            skip_call |=
7822                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7823                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
7824        }
7825        if (!mem_barrier)
7826            continue;
7827
7828        // Validate buffer barrier queue family indices
7829        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
7830             mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
7831            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
7832             mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) {
7833            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7834                                 DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
7835                                 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
7836                                 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
7837                                 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
7838                                 dev_data->phys_dev_properties.queue_family_properties.size());
7839        }
7840
7841        auto buffer_node = getBufferNode(dev_data, mem_barrier->buffer);
7842        if (buffer_node) {
7843            VkDeviceSize buffer_size =
7844                (buffer_node->createInfo.sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO) ? buffer_node->createInfo.size : 0;
7845            if (mem_barrier->offset >= buffer_size) {
7846                skip_call |= log_msg(
7847                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7848                    DRAWSTATE_INVALID_BARRIER, "DS",
7849                    "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
7850                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
7851                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(buffer_size));
7852            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
7853                skip_call |= log_msg(
7854                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7855                    DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
7856                                                     " whose sum is greater than total size 0x%" PRIx64 ".",
7857                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
7858                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(mem_barrier->size),
7859                    reinterpret_cast<const uint64_t &>(buffer_size));
7860            }
7861        }
7862    }
7863    return skip_call;
7864}
7865
7866bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) {
7867    bool skip_call = false;
7868    VkPipelineStageFlags stageMask = 0;
7869    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
7870    for (uint32_t i = 0; i < eventCount; ++i) {
7871        auto event = pCB->events[firstEventIndex + i];
7872        auto queue_data = dev_data->queueMap.find(queue);
7873        if (queue_data == dev_data->queueMap.end())
7874            return false;
7875        auto event_data = queue_data->second.eventToStageMap.find(event);
7876        if (event_data != queue_data->second.eventToStageMap.end()) {
7877            stageMask |= event_data->second;
7878        } else {
7879            auto global_event_data = dev_data->eventMap.find(event);
7880            if (global_event_data == dev_data->eventMap.end()) {
7881                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
7882                                     reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
7883                                     "Event 0x%" PRIx64 " cannot be waited on if it has never been set.",
7884                                     reinterpret_cast<const uint64_t &>(event));
7885            } else {
7886                stageMask |= global_event_data->second.stageMask;
7887            }
7888        }
7889    }
7890    // TODO: Need to validate that host_bit is only set if set event is called
7891    // but set event can be called at any time.
7892    if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
7893        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7894                             DRAWSTATE_INVALID_EVENT, "DS", "Submitting cmdbuffer with call to VkCmdWaitEvents "
7895                                                            "using srcStageMask 0x%X which must be the bitwise "
7896                                                            "OR of the stageMask parameters used in calls to "
7897                                                            "vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if "
7898                                                            "used with vkSetEvent but instead is 0x%X.",
7899                             sourceStageMask, stageMask);
7900    }
7901    return skip_call;
7902}
7903
7904VKAPI_ATTR void VKAPI_CALL
7905CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask,
7906              VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
7907              uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
7908              uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
7909    bool skipCall = false;
7910    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7911    std::unique_lock<std::mutex> lock(global_lock);
7912    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7913    if (pCB) {
7914        auto firstEventIndex = pCB->events.size();
7915        for (uint32_t i = 0; i < eventCount; ++i) {
7916            pCB->waitedEvents.insert(pEvents[i]);
7917            pCB->events.push_back(pEvents[i]);
7918        }
7919        std::function<bool(VkQueue)> eventUpdate =
7920            std::bind(validateEventStageMask, std::placeholders::_1, pCB, eventCount, firstEventIndex, sourceStageMask);
7921        pCB->eventUpdates.push_back(eventUpdate);
7922        if (pCB->state == CB_RECORDING) {
7923            skipCall |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()");
7924        } else {
7925            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
7926        }
7927        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
7928        skipCall |=
7929            ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7930                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7931    }
7932    lock.unlock();
7933    if (!skipCall)
7934        dev_data->device_dispatch_table->CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
7935                                                       memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7936                                                       pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7937}
7938
7939VKAPI_ATTR void VKAPI_CALL
7940CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
7941                   VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
7942                   uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
7943                   uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
7944    bool skipCall = false;
7945    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7946    std::unique_lock<std::mutex> lock(global_lock);
7947    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7948    if (pCB) {
7949        skipCall |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
7950        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
7951        skipCall |=
7952            ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7953                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7954    }
7955    lock.unlock();
7956    if (!skipCall)
7957        dev_data->device_dispatch_table->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
7958                                                            memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7959                                                            pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7960}
7961
7962bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
7963    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7964    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7965    if (pCB) {
7966        pCB->queryToStateMap[object] = value;
7967    }
7968    auto queue_data = dev_data->queueMap.find(queue);
7969    if (queue_data != dev_data->queueMap.end()) {
7970        queue_data->second.queryToStateMap[object] = value;
7971    }
7972    return false;
7973}
7974
7975VKAPI_ATTR void VKAPI_CALL
7976CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
7977    bool skipCall = false;
7978    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7979    std::unique_lock<std::mutex> lock(global_lock);
7980    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7981    if (pCB) {
7982        QueryObject query = {queryPool, slot};
7983        pCB->activeQueries.insert(query);
7984        if (!pCB->startedQueries.count(query)) {
7985            pCB->startedQueries.insert(query);
7986        }
7987        skipCall |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
7988    }
7989    lock.unlock();
7990    if (!skipCall)
7991        dev_data->device_dispatch_table->CmdBeginQuery(commandBuffer, queryPool, slot, flags);
7992}
7993
7994VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
7995    bool skipCall = false;
7996    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7997    std::unique_lock<std::mutex> lock(global_lock);
7998    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7999    if (pCB) {
8000        QueryObject query = {queryPool, slot};
8001        if (!pCB->activeQueries.count(query)) {
8002            skipCall |=
8003                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8004                        DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d",
8005                        (uint64_t)(queryPool), slot);
8006        } else {
8007            pCB->activeQueries.erase(query);
8008        }
8009        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
8010        pCB->queryUpdates.push_back(queryUpdate);
8011        if (pCB->state == CB_RECORDING) {
8012            skipCall |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
8013        } else {
8014            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
8015        }
8016    }
8017    lock.unlock();
8018    if (!skipCall)
8019        dev_data->device_dispatch_table->CmdEndQuery(commandBuffer, queryPool, slot);
8020}
8021
8022VKAPI_ATTR void VKAPI_CALL
8023CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
8024    bool skipCall = false;
8025    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8026    std::unique_lock<std::mutex> lock(global_lock);
8027    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8028    if (pCB) {
8029        for (uint32_t i = 0; i < queryCount; i++) {
8030            QueryObject query = {queryPool, firstQuery + i};
8031            pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
8032            std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, false);
8033            pCB->queryUpdates.push_back(queryUpdate);
8034        }
8035        if (pCB->state == CB_RECORDING) {
8036            skipCall |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
8037        } else {
8038            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
8039        }
8040        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool");
8041    }
8042    lock.unlock();
8043    if (!skipCall)
8044        dev_data->device_dispatch_table->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
8045}
8046
8047bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t queryCount, uint32_t firstQuery) {
8048    bool skip_call = false;
8049    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
8050    auto queue_data = dev_data->queueMap.find(queue);
8051    if (queue_data == dev_data->queueMap.end())
8052        return false;
8053    for (uint32_t i = 0; i < queryCount; i++) {
8054        QueryObject query = {queryPool, firstQuery + i};
8055        auto query_data = queue_data->second.queryToStateMap.find(query);
8056        bool fail = false;
8057        if (query_data != queue_data->second.queryToStateMap.end()) {
8058            if (!query_data->second) {
8059                fail = true;
8060            }
8061        } else {
8062            auto global_query_data = dev_data->queryToStateMap.find(query);
8063            if (global_query_data != dev_data->queryToStateMap.end()) {
8064                if (!global_query_data->second) {
8065                    fail = true;
8066                }
8067            } else {
8068                fail = true;
8069            }
8070        }
8071        if (fail) {
8072            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8073                                 DRAWSTATE_INVALID_QUERY, "DS",
8074                                 "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
8075                                 reinterpret_cast<uint64_t &>(queryPool), firstQuery + i);
8076        }
8077    }
8078    return skip_call;
8079}
8080
8081VKAPI_ATTR void VKAPI_CALL
8082CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
8083                        VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
8084    bool skipCall = false;
8085    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8086    std::unique_lock<std::mutex> lock(global_lock);
8087    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8088#if MTMERGESOURCE
8089    VkDeviceMemory mem;
8090    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8091    skipCall |=
8092        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8093    if (cb_data != dev_data->commandBufferMap.end()) {
8094        std::function<bool()> function = [=]() {
8095            set_memory_valid(dev_data, mem, true);
8096            return false;
8097        };
8098        cb_data->second->validate_functions.push_back(function);
8099    }
8100    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyQueryPoolResults");
8101    // Validate that DST buffer has correct usage flags set
8102    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8103                                            "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8104#endif
8105    if (pCB) {
8106        std::function<bool(VkQueue)> queryUpdate =
8107            std::bind(validateQuery, std::placeholders::_1, pCB, queryPool, queryCount, firstQuery);
8108        pCB->queryUpdates.push_back(queryUpdate);
8109        if (pCB->state == CB_RECORDING) {
8110            skipCall |= addCmd(dev_data, pCB, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
8111        } else {
8112            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
8113        }
8114        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyQueryPoolResults");
8115    }
8116    lock.unlock();
8117    if (!skipCall)
8118        dev_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer,
8119                                                                 dstOffset, stride, flags);
8120}
8121
8122VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
8123                                            VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
8124                                            const void *pValues) {
8125    bool skipCall = false;
8126    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8127    std::unique_lock<std::mutex> lock(global_lock);
8128    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8129    if (pCB) {
8130        if (pCB->state == CB_RECORDING) {
8131            skipCall |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
8132        } else {
8133            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
8134        }
8135    }
8136    skipCall |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
8137    if (0 == stageFlags) {
8138        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8139                            DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() call has no stageFlags set.");
8140    }
8141
8142    // Check if push constant update is within any of the ranges with the same stage flags specified in pipeline layout.
8143    auto pipeline_layout = getPipelineLayout(dev_data, layout);
8144    if (!pipeline_layout) {
8145        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8146                            DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() Pipeline Layout 0x%" PRIx64 " not found.",
8147                            (uint64_t)layout);
8148    } else {
8149        // Coalesce adjacent/overlapping pipeline ranges before checking to see if incoming range is
8150        // contained in the pipeline ranges.
8151        // Build a {start, end} span list for ranges with matching stage flags.
8152        const auto &ranges = pipeline_layout->pushConstantRanges;
8153        struct span {
8154            uint32_t start;
8155            uint32_t end;
8156        };
8157        std::vector<span> spans;
8158        spans.reserve(ranges.size());
8159        for (const auto &iter : ranges) {
8160            if (iter.stageFlags == stageFlags) {
8161                spans.push_back({iter.offset, iter.offset + iter.size});
8162            }
8163        }
8164        if (spans.size() == 0) {
8165            // There were no ranges that matched the stageFlags.
8166            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8167                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS",
8168                                "vkCmdPushConstants() stageFlags = 0x%" PRIx32 " do not match "
8169                                "the stageFlags in any of the ranges in pipeline layout 0x%" PRIx64 ".",
8170                                (uint32_t)stageFlags, (uint64_t)layout);
8171        } else {
8172            // Sort span list by start value.
8173            struct comparer {
8174                bool operator()(struct span i, struct span j) { return i.start < j.start; }
8175            } my_comparer;
8176            std::sort(spans.begin(), spans.end(), my_comparer);
8177
8178            // Examine two spans at a time.
8179            std::vector<span>::iterator current = spans.begin();
8180            std::vector<span>::iterator next = current + 1;
8181            while (next != spans.end()) {
8182                if (current->end < next->start) {
8183                    // There is a gap; cannot coalesce. Move to the next two spans.
8184                    ++current;
8185                    ++next;
8186                } else {
8187                    // Coalesce the two spans.  The start of the next span
8188                    // is within the current span, so pick the larger of
8189                    // the end values to extend the current span.
8190                    // Then delete the next span and set next to the span after it.
8191                    current->end = max(current->end, next->end);
8192                    next = spans.erase(next);
8193                }
8194            }
8195
8196            // Now we can check if the incoming range is within any of the spans.
8197            bool contained_in_a_range = false;
8198            for (uint32_t i = 0; i < spans.size(); ++i) {
8199                if ((offset >= spans[i].start) && ((uint64_t)offset + (uint64_t)size <= (uint64_t)spans[i].end)) {
8200                    contained_in_a_range = true;
8201                    break;
8202                }
8203            }
8204            if (!contained_in_a_range) {
8205                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8206                                    __LINE__, DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS",
8207                                    "vkCmdPushConstants() Push constant range [%d, %d) "
8208                                    "with stageFlags = 0x%" PRIx32 " "
8209                                    "not within flag-matching ranges in pipeline layout 0x%" PRIx64 ".",
8210                                    offset, offset + size, (uint32_t)stageFlags, (uint64_t)layout);
8211            }
8212        }
8213    }
8214    lock.unlock();
8215    if (!skipCall)
8216        dev_data->device_dispatch_table->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
8217}
8218
8219VKAPI_ATTR void VKAPI_CALL
8220CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
8221    bool skipCall = false;
8222    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8223    std::unique_lock<std::mutex> lock(global_lock);
8224    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8225    if (pCB) {
8226        QueryObject query = {queryPool, slot};
8227        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
8228        pCB->queryUpdates.push_back(queryUpdate);
8229        if (pCB->state == CB_RECORDING) {
8230            skipCall |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
8231        } else {
8232            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
8233        }
8234    }
8235    lock.unlock();
8236    if (!skipCall)
8237        dev_data->device_dispatch_table->CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
8238}
8239
8240static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
8241                       const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag) {
8242    bool skip_call = false;
8243
8244    for (uint32_t attach = 0; attach < count; attach++) {
8245        if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
8246            // Attachment counts are verified elsewhere, but prevent an invalid access
8247            if (attachments[attach].attachment < fbci->attachmentCount) {
8248                const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
8249                VkImageViewCreateInfo *ivci = getImageViewData(dev_data, *image_view);
8250                if (ivci != nullptr) {
8251                    const VkImageCreateInfo *ici = &getImageNode(dev_data, ivci->image)->createInfo;
8252                    if (ici != nullptr) {
8253                        if ((ici->usage & usage_flag) == 0) {
8254                            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
8255                                                 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_USAGE, "DS",
8256                                                 "vkCreateFramebuffer:  Framebuffer Attachment (%d) conflicts with the image's "
8257                                                 "IMAGE_USAGE flags (%s).",
8258                                                 attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
8259                        }
8260                    }
8261                }
8262            }
8263        }
8264    }
8265    return skip_call;
8266}
8267
8268// Validate VkFramebufferCreateInfo which includes:
8269// 1. attachmentCount equals renderPass attachmentCount
8270// 2. corresponding framebuffer and renderpass attachments have matching formats
8271// 3. corresponding framebuffer and renderpass attachments have matching sample counts
8272// 4. fb attachments only have a single mip level
8273// 5. fb attachment dimensions are each at least as large as the fb
8274// 6. fb attachments use idenity swizzle
8275// 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
8276// 8. fb dimensions are within physical device limits
8277static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
8278    bool skip_call = false;
8279
8280    auto rp_node = getRenderPass(dev_data, pCreateInfo->renderPass);
8281    if (rp_node) {
8282        const VkRenderPassCreateInfo *rpci = rp_node->pCreateInfo;
8283        if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
8284            skip_call |= log_msg(
8285                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8286                reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
8287                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount of %u of "
8288                "renderPass (0x%" PRIxLEAST64 ") being used to create Framebuffer.",
8289                pCreateInfo->attachmentCount, rpci->attachmentCount, reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
8290        } else {
8291            // attachmentCounts match, so make sure corresponding attachment details line up
8292            const VkImageView *image_views = pCreateInfo->pAttachments;
8293            for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8294                VkImageViewCreateInfo *ivci = getImageViewData(dev_data, image_views[i]);
8295                if (ivci->format != rpci->pAttachments[i].format) {
8296                    skip_call |= log_msg(
8297                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8298                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
8299                        "DS", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not match "
8300                              "the format of "
8301                              "%s used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 ").",
8302                        i, string_VkFormat(ivci->format), string_VkFormat(rpci->pAttachments[i].format),
8303                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
8304                }
8305                const VkImageCreateInfo *ici = &getImageNode(dev_data, ivci->image)->createInfo;
8306                if (ici->samples != rpci->pAttachments[i].samples) {
8307                    skip_call |= log_msg(
8308                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8309                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
8310                        "DS", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match "
8311                              "the %s samples used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 ").",
8312                        i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
8313                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
8314                }
8315                // Verify that view only has a single mip level
8316                if (ivci->subresourceRange.levelCount != 1) {
8317                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
8318                                         __LINE__, DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
8319                                         "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u "
8320                                         "but only a single mip level (levelCount ==  1) is allowed when creating a Framebuffer.",
8321                                         i, ivci->subresourceRange.levelCount);
8322                }
8323                const uint32_t mip_level = ivci->subresourceRange.baseMipLevel;
8324                uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
8325                uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
8326                if ((ivci->subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
8327                    (mip_height < pCreateInfo->height)) {
8328                    skip_call |=
8329                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
8330                                DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
8331                                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions smaller "
8332                                "than the corresponding "
8333                                "framebuffer dimensions. Attachment dimensions must be at least as large. Here are the respective "
8334                                "dimensions for "
8335                                "attachment #%u, framebuffer:\n"
8336                                "width: %u, %u\n"
8337                                "height: %u, %u\n"
8338                                "layerCount: %u, %u\n",
8339                                i, ivci->subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height,
8340                                pCreateInfo->height, ivci->subresourceRange.layerCount, pCreateInfo->layers);
8341                }
8342                if (((ivci->components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci->components.r != VK_COMPONENT_SWIZZLE_R)) ||
8343                    ((ivci->components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci->components.g != VK_COMPONENT_SWIZZLE_G)) ||
8344                    ((ivci->components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci->components.b != VK_COMPONENT_SWIZZLE_B)) ||
8345                    ((ivci->components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci->components.a != VK_COMPONENT_SWIZZLE_A))) {
8346                    skip_call |= log_msg(
8347                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
8348                        DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
8349                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All framebuffer "
8350                        "attachments must have been created with the identity swizzle. Here are the actual swizzle values:\n"
8351                        "r swizzle = %s\n"
8352                        "g swizzle = %s\n"
8353                        "b swizzle = %s\n"
8354                        "a swizzle = %s\n",
8355                        i, string_VkComponentSwizzle(ivci->components.r), string_VkComponentSwizzle(ivci->components.g),
8356                        string_VkComponentSwizzle(ivci->components.b), string_VkComponentSwizzle(ivci->components.a));
8357                }
8358            }
8359        }
8360        // Verify correct attachment usage flags
8361        for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
8362            // Verify input attachments:
8363            skip_call |= MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount,
8364                                    rpci->pSubpasses[subpass].pInputAttachments, pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT);
8365            // Verify color attachments:
8366            skip_call |= MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount,
8367                                    rpci->pSubpasses[subpass].pColorAttachments, pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
8368            // Verify depth/stencil attachments:
8369            if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
8370                skip_call |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
8371                                        VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
8372            }
8373        }
8374    } else {
8375        skip_call |=
8376            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8377                    reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8378                    "vkCreateFramebuffer(): Attempt to create framebuffer with invalid renderPass (0x%" PRIxLEAST64 ").",
8379                    reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
8380    }
8381    // Verify FB dimensions are within physical device limits
8382    if ((pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) ||
8383        (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) ||
8384        (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers)) {
8385        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
8386                             DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
8387                             "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo dimensions exceed physical device limits. "
8388                             "Here are the respective dimensions: requested, device max:\n"
8389                             "width: %u, %u\n"
8390                             "height: %u, %u\n"
8391                             "layerCount: %u, %u\n",
8392                             pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth,
8393                             pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight,
8394                             pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers);
8395    }
8396    return skip_call;
8397}
8398
8399// Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
8400//  Return true if an error is encountered and callback returns true to skip call down chain
8401//   false indicates that call down chain should proceed
8402static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
8403    // TODO : Verify that renderPass FB is created with is compatible with FB
8404    bool skip_call = false;
8405    skip_call |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
8406    return skip_call;
8407}
8408
8409// CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
8410static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
8411    // Shadow create info and store in map
8412    std::unique_ptr<FRAMEBUFFER_NODE> fb_node(
8413        new FRAMEBUFFER_NODE(pCreateInfo, dev_data->renderPassMap[pCreateInfo->renderPass]->pCreateInfo));
8414
8415    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8416        VkImageView view = pCreateInfo->pAttachments[i];
8417        auto view_data = getImageViewData(dev_data, view);
8418        if (!view_data) {
8419            continue;
8420        }
8421        MT_FB_ATTACHMENT_INFO fb_info;
8422        get_mem_binding_from_object(dev_data, (uint64_t)(view_data->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8423                                    &fb_info.mem);
8424        fb_info.image = view_data->image;
8425        fb_node->attachments.push_back(fb_info);
8426    }
8427    dev_data->frameBufferMap[fb] = std::move(fb_node);
8428}
8429
8430VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
8431                                                 const VkAllocationCallbacks *pAllocator,
8432                                                 VkFramebuffer *pFramebuffer) {
8433    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8434    std::unique_lock<std::mutex> lock(global_lock);
8435    bool skip_call = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
8436    lock.unlock();
8437
8438    if (skip_call)
8439        return VK_ERROR_VALIDATION_FAILED_EXT;
8440
8441    VkResult result = dev_data->device_dispatch_table->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
8442
8443    if (VK_SUCCESS == result) {
8444        lock.lock();
8445        PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
8446        lock.unlock();
8447    }
8448    return result;
8449}
8450
8451static bool FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
8452                           std::unordered_set<uint32_t> &processed_nodes) {
8453    // If we have already checked this node we have not found a dependency path so return false.
8454    if (processed_nodes.count(index))
8455        return false;
8456    processed_nodes.insert(index);
8457    const DAGNode &node = subpass_to_node[index];
8458    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
8459    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
8460        for (auto elem : node.prev) {
8461            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
8462                return true;
8463        }
8464    } else {
8465        return true;
8466    }
8467    return false;
8468}
8469
8470static bool CheckDependencyExists(const layer_data *my_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
8471                                  const std::vector<DAGNode> &subpass_to_node, bool &skip_call) {
8472    bool result = true;
8473    // Loop through all subpasses that share the same attachment and make sure a dependency exists
8474    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
8475        if (static_cast<uint32_t>(subpass) == dependent_subpasses[k])
8476            continue;
8477        const DAGNode &node = subpass_to_node[subpass];
8478        // Check for a specified dependency between the two nodes. If one exists we are done.
8479        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
8480        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
8481        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
8482            // If no dependency exits an implicit dependency still might. If not, throw an error.
8483            std::unordered_set<uint32_t> processed_nodes;
8484            if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
8485                FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
8486                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8487                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8488                                     "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
8489                                     dependent_subpasses[k]);
8490                result = false;
8491            }
8492        }
8493    }
8494    return result;
8495}
8496
8497static bool CheckPreserved(const layer_data *my_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
8498                           const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip_call) {
8499    const DAGNode &node = subpass_to_node[index];
8500    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
8501    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
8502    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8503        if (attachment == subpass.pColorAttachments[j].attachment)
8504            return true;
8505    }
8506    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8507        if (attachment == subpass.pDepthStencilAttachment->attachment)
8508            return true;
8509    }
8510    bool result = false;
8511    // Loop through previous nodes and see if any of them write to the attachment.
8512    for (auto elem : node.prev) {
8513        result |= CheckPreserved(my_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
8514    }
8515    // If the attachment was written to by a previous node than this node needs to preserve it.
8516    if (result && depth > 0) {
8517        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
8518        bool has_preserved = false;
8519        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
8520            if (subpass.pPreserveAttachments[j] == attachment) {
8521                has_preserved = true;
8522                break;
8523            }
8524        }
8525        if (!has_preserved) {
8526            skip_call |=
8527                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8528                        DRAWSTATE_INVALID_RENDERPASS, "DS",
8529                        "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
8530        }
8531    }
8532    return result;
8533}
8534
8535template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
8536    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
8537           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
8538}
8539
8540bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
8541    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
8542            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
8543}
8544
8545static bool ValidateDependencies(const layer_data *my_data, FRAMEBUFFER_NODE const * framebuffer,
8546                                 RENDER_PASS_NODE const * renderPass) {
8547    bool skip_call = false;
8548    const safe_VkFramebufferCreateInfo *pFramebufferInfo = &framebuffer->createInfo;
8549    const VkRenderPassCreateInfo *pCreateInfo = renderPass->pCreateInfo;
8550    auto const & subpass_to_node = renderPass->subpassToNode;
8551    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
8552    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
8553    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
8554    // Find overlapping attachments
8555    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8556        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
8557            VkImageView viewi = pFramebufferInfo->pAttachments[i];
8558            VkImageView viewj = pFramebufferInfo->pAttachments[j];
8559            if (viewi == viewj) {
8560                overlapping_attachments[i].push_back(j);
8561                overlapping_attachments[j].push_back(i);
8562                continue;
8563            }
8564            auto view_data_i = getImageViewData(my_data, viewi);
8565            auto view_data_j = getImageViewData(my_data, viewj);
8566            if (!view_data_i || !view_data_j) {
8567                continue;
8568            }
8569            if (view_data_i->image == view_data_j->image &&
8570                isRegionOverlapping(view_data_i->subresourceRange, view_data_j->subresourceRange)) {
8571                overlapping_attachments[i].push_back(j);
8572                overlapping_attachments[j].push_back(i);
8573                continue;
8574            }
8575            auto image_data_i = getImageNode(my_data, view_data_i->image);
8576            auto image_data_j = getImageNode(my_data, view_data_j->image);
8577            if (!image_data_i || !image_data_j) {
8578                continue;
8579            }
8580            if (image_data_i->mem == image_data_j->mem && isRangeOverlapping(image_data_i->memOffset, image_data_i->memSize,
8581                                                                             image_data_j->memOffset, image_data_j->memSize)) {
8582                overlapping_attachments[i].push_back(j);
8583                overlapping_attachments[j].push_back(i);
8584            }
8585        }
8586    }
8587    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
8588        uint32_t attachment = i;
8589        for (auto other_attachment : overlapping_attachments[i]) {
8590            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8591                skip_call |=
8592                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8593                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
8594                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
8595                            attachment, other_attachment);
8596            }
8597            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8598                skip_call |=
8599                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8600                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
8601                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
8602                            other_attachment, attachment);
8603            }
8604        }
8605    }
8606    // Find for each attachment the subpasses that use them.
8607    unordered_set<uint32_t> attachmentIndices;
8608    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8609        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8610        attachmentIndices.clear();
8611        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8612            uint32_t attachment = subpass.pInputAttachments[j].attachment;
8613            input_attachment_to_subpass[attachment].push_back(i);
8614            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8615                input_attachment_to_subpass[overlapping_attachment].push_back(i);
8616            }
8617        }
8618        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8619            uint32_t attachment = subpass.pColorAttachments[j].attachment;
8620            output_attachment_to_subpass[attachment].push_back(i);
8621            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8622                output_attachment_to_subpass[overlapping_attachment].push_back(i);
8623            }
8624            attachmentIndices.insert(attachment);
8625        }
8626        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8627            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
8628            output_attachment_to_subpass[attachment].push_back(i);
8629            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8630                output_attachment_to_subpass[overlapping_attachment].push_back(i);
8631            }
8632
8633            if (attachmentIndices.count(attachment)) {
8634                skip_call |=
8635                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
8636                            0, __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8637                            "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).",
8638                            attachment, i);
8639            }
8640        }
8641    }
8642    // If there is a dependency needed make sure one exists
8643    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8644        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8645        // If the attachment is an input then all subpasses that output must have a dependency relationship
8646        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8647            const uint32_t &attachment = subpass.pInputAttachments[j].attachment;
8648            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8649        }
8650        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
8651        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8652            const uint32_t &attachment = subpass.pColorAttachments[j].attachment;
8653            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8654            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8655        }
8656        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8657            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
8658            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8659            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8660        }
8661    }
8662    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
8663    // written.
8664    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8665        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8666        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8667            CheckPreserved(my_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
8668        }
8669    }
8670    return skip_call;
8671}
8672// ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the
8673// VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that
8674// READ_ONLY layout attachments don't have CLEAR as their loadOp.
8675static bool ValidateLayoutVsAttachmentDescription(debug_report_data *report_data, const VkImageLayout first_layout,
8676                                                  const uint32_t attachment,
8677                                                  const VkAttachmentDescription &attachment_description) {
8678    bool skip_call = false;
8679    // Verify that initial loadOp on READ_ONLY attachments is not CLEAR
8680    if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
8681        if ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
8682            (first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)) {
8683            skip_call |=
8684                log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8685                        VkDebugReportObjectTypeEXT(0), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8686                        "Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout));
8687        }
8688    }
8689    return skip_call;
8690}
8691
8692static bool ValidateLayouts(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
8693    bool skip = false;
8694
8695    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8696        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8697        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8698            if (subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL &&
8699                subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
8700                if (subpass.pInputAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
8701                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
8702                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8703                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8704                                    "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
8705                } else {
8706                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8707                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8708                                    "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
8709                                    string_VkImageLayout(subpass.pInputAttachments[j].layout));
8710                }
8711            }
8712            auto attach_index = subpass.pInputAttachments[j].attachment;
8713            skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pInputAttachments[j].layout, attach_index,
8714                                                          pCreateInfo->pAttachments[attach_index]);
8715        }
8716        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8717            if (subpass.pColorAttachments[j].layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
8718                if (subpass.pColorAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
8719                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
8720                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8721                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8722                                    "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
8723                } else {
8724                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8725                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8726                                    "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
8727                                    string_VkImageLayout(subpass.pColorAttachments[j].layout));
8728                }
8729            }
8730            auto attach_index = subpass.pColorAttachments[j].attachment;
8731            skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pColorAttachments[j].layout, attach_index,
8732                                                          pCreateInfo->pAttachments[attach_index]);
8733        }
8734        if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
8735            if (subpass.pDepthStencilAttachment->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) {
8736                if (subpass.pDepthStencilAttachment->layout == VK_IMAGE_LAYOUT_GENERAL) {
8737                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
8738                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8739                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8740                                    "Layout for depth attachment is GENERAL but should be DEPTH_STENCIL_ATTACHMENT_OPTIMAL.");
8741                } else {
8742                    skip |=
8743                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8744                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8745                                "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL or GENERAL.",
8746                                string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
8747                }
8748            }
8749            auto attach_index = subpass.pDepthStencilAttachment->attachment;
8750            skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pDepthStencilAttachment->layout,
8751                                                          attach_index, pCreateInfo->pAttachments[attach_index]);
8752        }
8753    }
8754    return skip;
8755}
8756
8757static bool CreatePassDAG(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
8758                          std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
8759    bool skip_call = false;
8760    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8761        DAGNode &subpass_node = subpass_to_node[i];
8762        subpass_node.pass = i;
8763    }
8764    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
8765        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
8766        if (dependency.srcSubpass > dependency.dstSubpass && dependency.srcSubpass != VK_SUBPASS_EXTERNAL &&
8767            dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
8768            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8769                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
8770                                 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
8771        } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL && dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
8772            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8773                                 DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
8774        } else if (dependency.srcSubpass == dependency.dstSubpass) {
8775            has_self_dependency[dependency.srcSubpass] = true;
8776        }
8777        if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
8778            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
8779        }
8780        if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL) {
8781            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
8782        }
8783    }
8784    return skip_call;
8785}
8786
8787
8788VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
8789                                                  const VkAllocationCallbacks *pAllocator,
8790                                                  VkShaderModule *pShaderModule) {
8791    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8792    bool skip_call = false;
8793
8794    /* Use SPIRV-Tools validator to try and catch any issues with the module itself */
8795    spv_context ctx = spvContextCreate(SPV_ENV_VULKAN_1_0);
8796    spv_const_binary_t binary { pCreateInfo->pCode, pCreateInfo->codeSize / sizeof(uint32_t) };
8797    spv_diagnostic diag = nullptr;
8798
8799    auto result = spvValidate(ctx, &binary, &diag);
8800    if (result != SPV_SUCCESS) {
8801        skip_call |= log_msg(my_data->report_data,
8802                             result == SPV_WARNING ? VK_DEBUG_REPORT_WARNING_BIT_EXT : VK_DEBUG_REPORT_ERROR_BIT_EXT,
8803                             VkDebugReportObjectTypeEXT(0), 0,
8804                             __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC", "SPIR-V module not valid: %s",
8805                             diag && diag->error ? diag->error : "(no error text)");
8806    }
8807
8808    spvDiagnosticDestroy(diag);
8809    spvContextDestroy(ctx);
8810
8811    if (skip_call)
8812        return VK_ERROR_VALIDATION_FAILED_EXT;
8813
8814    VkResult res = my_data->device_dispatch_table->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
8815
8816    if (res == VK_SUCCESS) {
8817        std::lock_guard<std::mutex> lock(global_lock);
8818        my_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
8819    }
8820    return res;
8821}
8822
8823static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
8824    bool skip_call = false;
8825    if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
8826        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8827                             DRAWSTATE_INVALID_ATTACHMENT_INDEX, "DS",
8828                             "CreateRenderPass: %s attachment %d cannot be greater than the total number of attachments %d.",
8829                             type, attachment, attachment_count);
8830    }
8831    return skip_call;
8832}
8833
8834static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
8835    bool skip_call = false;
8836    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8837        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8838        if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
8839            skip_call |=
8840                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8841                        DRAWSTATE_INVALID_RENDERPASS, "DS",
8842                        "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", i);
8843        }
8844        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
8845            uint32_t attachment = subpass.pPreserveAttachments[j];
8846            if (attachment == VK_ATTACHMENT_UNUSED) {
8847                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8848                                     __LINE__, DRAWSTATE_INVALID_ATTACHMENT_INDEX, "DS",
8849                                     "CreateRenderPass:  Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", j);
8850            } else {
8851                skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
8852            }
8853        }
8854        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8855            uint32_t attachment;
8856            if (subpass.pResolveAttachments) {
8857                attachment = subpass.pResolveAttachments[j].attachment;
8858                skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve");
8859            }
8860            attachment = subpass.pColorAttachments[j].attachment;
8861            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color");
8862        }
8863        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8864            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
8865            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil");
8866        }
8867        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8868            uint32_t attachment = subpass.pInputAttachments[j].attachment;
8869            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input");
8870        }
8871    }
8872    return skip_call;
8873}
8874
8875VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
8876                                                const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
8877    bool skip_call = false;
8878    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8879
8880    std::unique_lock<std::mutex> lock(global_lock);
8881
8882    skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
8883    // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
8884    //       ValidateLayouts.
8885    skip_call |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
8886    lock.unlock();
8887
8888    if (skip_call) {
8889        return VK_ERROR_VALIDATION_FAILED_EXT;
8890    }
8891
8892    VkResult result = dev_data->device_dispatch_table->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
8893
8894    if (VK_SUCCESS == result) {
8895        lock.lock();
8896
8897        std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
8898        std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
8899        skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
8900
8901        // Shadow create info and store in map
8902        VkRenderPassCreateInfo *localRPCI = new VkRenderPassCreateInfo(*pCreateInfo);
8903        if (pCreateInfo->pAttachments) {
8904            localRPCI->pAttachments = new VkAttachmentDescription[localRPCI->attachmentCount];
8905            memcpy((void *)localRPCI->pAttachments, pCreateInfo->pAttachments,
8906                   localRPCI->attachmentCount * sizeof(VkAttachmentDescription));
8907        }
8908        if (pCreateInfo->pSubpasses) {
8909            localRPCI->pSubpasses = new VkSubpassDescription[localRPCI->subpassCount];
8910            memcpy((void *)localRPCI->pSubpasses, pCreateInfo->pSubpasses, localRPCI->subpassCount * sizeof(VkSubpassDescription));
8911
8912            for (uint32_t i = 0; i < localRPCI->subpassCount; i++) {
8913                VkSubpassDescription *subpass = (VkSubpassDescription *)&localRPCI->pSubpasses[i];
8914                const uint32_t attachmentCount = subpass->inputAttachmentCount +
8915                                                 subpass->colorAttachmentCount * (1 + (subpass->pResolveAttachments ? 1 : 0)) +
8916                                                 ((subpass->pDepthStencilAttachment) ? 1 : 0) + subpass->preserveAttachmentCount;
8917                VkAttachmentReference *attachments = new VkAttachmentReference[attachmentCount];
8918
8919                memcpy(attachments, subpass->pInputAttachments, sizeof(attachments[0]) * subpass->inputAttachmentCount);
8920                subpass->pInputAttachments = attachments;
8921                attachments += subpass->inputAttachmentCount;
8922
8923                memcpy(attachments, subpass->pColorAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
8924                subpass->pColorAttachments = attachments;
8925                attachments += subpass->colorAttachmentCount;
8926
8927                if (subpass->pResolveAttachments) {
8928                    memcpy(attachments, subpass->pResolveAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
8929                    subpass->pResolveAttachments = attachments;
8930                    attachments += subpass->colorAttachmentCount;
8931                }
8932
8933                if (subpass->pDepthStencilAttachment) {
8934                    memcpy(attachments, subpass->pDepthStencilAttachment, sizeof(attachments[0]) * 1);
8935                    subpass->pDepthStencilAttachment = attachments;
8936                    attachments += 1;
8937                }
8938
8939                memcpy(attachments, subpass->pPreserveAttachments, sizeof(attachments[0]) * subpass->preserveAttachmentCount);
8940                subpass->pPreserveAttachments = &attachments->attachment;
8941            }
8942        }
8943        if (pCreateInfo->pDependencies) {
8944            localRPCI->pDependencies = new VkSubpassDependency[localRPCI->dependencyCount];
8945            memcpy((void *)localRPCI->pDependencies, pCreateInfo->pDependencies,
8946                   localRPCI->dependencyCount * sizeof(VkSubpassDependency));
8947        }
8948
8949        auto render_pass = new RENDER_PASS_NODE(localRPCI);
8950        render_pass->renderPass = *pRenderPass;
8951        render_pass->hasSelfDependency = has_self_dependency;
8952        render_pass->subpassToNode = subpass_to_node;
8953#if MTMERGESOURCE
8954        // MTMTODO : Merge with code from above to eliminate duplication
8955        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8956            VkAttachmentDescription desc = pCreateInfo->pAttachments[i];
8957            MT_PASS_ATTACHMENT_INFO pass_info;
8958            pass_info.load_op = desc.loadOp;
8959            pass_info.store_op = desc.storeOp;
8960            pass_info.stencil_load_op = desc.stencilLoadOp;
8961            pass_info.stencil_store_op = desc.stencilStoreOp;
8962            pass_info.attachment = i;
8963            render_pass->attachments.push_back(pass_info);
8964        }
8965        // TODO: Maybe fill list and then copy instead of locking
8966        std::unordered_map<uint32_t, bool> &attachment_first_read = render_pass->attachment_first_read;
8967        std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout = render_pass->attachment_first_layout;
8968        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8969            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8970            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8971                uint32_t attachment = subpass.pColorAttachments[j].attachment;
8972                if (!attachment_first_read.count(attachment)) {
8973                    attachment_first_read.insert(std::make_pair(attachment, false));
8974                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
8975                }
8976            }
8977            if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8978                uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
8979                if (!attachment_first_read.count(attachment)) {
8980                    attachment_first_read.insert(std::make_pair(attachment, false));
8981                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
8982                }
8983            }
8984            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8985                uint32_t attachment = subpass.pInputAttachments[j].attachment;
8986                if (!attachment_first_read.count(attachment)) {
8987                    attachment_first_read.insert(std::make_pair(attachment, true));
8988                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
8989                }
8990            }
8991        }
8992#endif
8993        dev_data->renderPassMap[*pRenderPass] = render_pass;
8994    }
8995    return result;
8996}
8997
8998// Free the renderpass shadow
8999static void deleteRenderPasses(layer_data *my_data) {
9000    for (auto renderPass : my_data->renderPassMap) {
9001        const VkRenderPassCreateInfo *pRenderPassInfo = renderPass.second->pCreateInfo;
9002        delete[] pRenderPassInfo->pAttachments;
9003        if (pRenderPassInfo->pSubpasses) {
9004            for (uint32_t i = 0; i < pRenderPassInfo->subpassCount; ++i) {
9005                // Attachements are all allocated in a block, so just need to
9006                //  find the first non-null one to delete
9007                if (pRenderPassInfo->pSubpasses[i].pInputAttachments) {
9008                    delete[] pRenderPassInfo->pSubpasses[i].pInputAttachments;
9009                } else if (pRenderPassInfo->pSubpasses[i].pColorAttachments) {
9010                    delete[] pRenderPassInfo->pSubpasses[i].pColorAttachments;
9011                } else if (pRenderPassInfo->pSubpasses[i].pResolveAttachments) {
9012                    delete[] pRenderPassInfo->pSubpasses[i].pResolveAttachments;
9013                } else if (pRenderPassInfo->pSubpasses[i].pPreserveAttachments) {
9014                    delete[] pRenderPassInfo->pSubpasses[i].pPreserveAttachments;
9015                }
9016            }
9017            delete[] pRenderPassInfo->pSubpasses;
9018        }
9019        delete[] pRenderPassInfo->pDependencies;
9020        delete pRenderPassInfo;
9021        delete renderPass.second;
9022    }
9023    my_data->renderPassMap.clear();
9024}
9025
9026static bool VerifyFramebufferAndRenderPassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
9027    bool skip_call = false;
9028    const VkRenderPassCreateInfo *pRenderPassInfo = dev_data->renderPassMap[pRenderPassBegin->renderPass]->pCreateInfo;
9029    const safe_VkFramebufferCreateInfo framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer]->createInfo;
9030    if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
9031        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9032                             DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
9033                                                                 "with a different number of attachments.");
9034    }
9035    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9036        const VkImageView &image_view = framebufferInfo.pAttachments[i];
9037        auto image_data = getImageViewData(dev_data, image_view);
9038        assert(image_data);
9039        const VkImage &image = image_data->image;
9040        const VkImageSubresourceRange &subRange = image_data->subresourceRange;
9041        IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
9042                                             pRenderPassInfo->pAttachments[i].initialLayout};
9043        // TODO: Do not iterate over every possibility - consolidate where possible
9044        for (uint32_t j = 0; j < subRange.levelCount; j++) {
9045            uint32_t level = subRange.baseMipLevel + j;
9046            for (uint32_t k = 0; k < subRange.layerCount; k++) {
9047                uint32_t layer = subRange.baseArrayLayer + k;
9048                VkImageSubresource sub = {subRange.aspectMask, level, layer};
9049                IMAGE_CMD_BUF_LAYOUT_NODE node;
9050                if (!FindLayout(pCB, image, sub, node)) {
9051                    SetLayout(pCB, image, sub, newNode);
9052                    continue;
9053                }
9054                if (newNode.layout != node.layout) {
9055                    skip_call |=
9056                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9057                                DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using attachment %i "
9058                                                                    "where the "
9059                                                                    "initial layout is %s and the layout of the attachment at the "
9060                                                                    "start of the render pass is %s. The layouts must match.",
9061                                i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout));
9062                }
9063            }
9064        }
9065    }
9066    return skip_call;
9067}
9068
9069static void TransitionSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin,
9070                                     const int subpass_index) {
9071    auto renderPass = getRenderPass(dev_data, pRenderPassBegin->renderPass);
9072    if (!renderPass)
9073        return;
9074
9075    auto framebuffer = getFramebuffer(dev_data, pRenderPassBegin->framebuffer);
9076    if (!framebuffer)
9077        return;
9078
9079    const safe_VkFramebufferCreateInfo &framebufferInfo = framebuffer->createInfo;
9080    const VkSubpassDescription &subpass = renderPass->pCreateInfo->pSubpasses[subpass_index];
9081    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9082        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pInputAttachments[j].attachment];
9083        SetLayout(dev_data, pCB, image_view, subpass.pInputAttachments[j].layout);
9084    }
9085    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9086        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pColorAttachments[j].attachment];
9087        SetLayout(dev_data, pCB, image_view, subpass.pColorAttachments[j].layout);
9088    }
9089    if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
9090        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pDepthStencilAttachment->attachment];
9091        SetLayout(dev_data, pCB, image_view, subpass.pDepthStencilAttachment->layout);
9092    }
9093}
9094
9095static bool validatePrimaryCommandBuffer(const layer_data *my_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) {
9096    bool skip_call = false;
9097    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
9098        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9099                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.",
9100                             cmd_name.c_str());
9101    }
9102    return skip_call;
9103}
9104
9105static void TransitionFinalSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
9106    auto renderPass = getRenderPass(dev_data, pRenderPassBegin->renderPass);
9107    if (!renderPass)
9108        return;
9109
9110    const VkRenderPassCreateInfo *pRenderPassInfo = renderPass->pCreateInfo;
9111    auto framebuffer = getFramebuffer(dev_data, pRenderPassBegin->framebuffer);
9112    if (!framebuffer)
9113        return;
9114
9115    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9116        const VkImageView &image_view = framebuffer->createInfo.pAttachments[i];
9117        SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
9118    }
9119}
9120
9121static bool VerifyRenderAreaBounds(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
9122    bool skip_call = false;
9123    const safe_VkFramebufferCreateInfo *pFramebufferInfo = &getFramebuffer(my_data, pRenderPassBegin->framebuffer)->createInfo;
9124    if (pRenderPassBegin->renderArea.offset.x < 0 ||
9125        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
9126        pRenderPassBegin->renderArea.offset.y < 0 ||
9127        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
9128        skip_call |= static_cast<bool>(log_msg(
9129            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9130            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
9131            "Cannot execute a render pass with renderArea not within the bound of the "
9132            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
9133            "height %d.",
9134            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
9135            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
9136    }
9137    return skip_call;
9138}
9139
9140// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
9141// [load|store]Op flag must be checked
9142// TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
9143template <typename T> static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
9144    if (color_depth_op != op && stencil_op != op) {
9145        return false;
9146    }
9147    bool check_color_depth_load_op = !vk_format_is_stencil_only(format);
9148    bool check_stencil_load_op = vk_format_is_depth_and_stencil(format) || !check_color_depth_load_op;
9149
9150    return (((check_color_depth_load_op == true) && (color_depth_op == op)) ||
9151            ((check_stencil_load_op == true) && (stencil_op == op)));
9152}
9153
9154VKAPI_ATTR void VKAPI_CALL
9155CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
9156    bool skipCall = false;
9157    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9158    std::unique_lock<std::mutex> lock(global_lock);
9159    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9160    auto renderPass = pRenderPassBegin ? getRenderPass(dev_data, pRenderPassBegin->renderPass) : nullptr;
9161    auto framebuffer = pRenderPassBegin ? getFramebuffer(dev_data, pRenderPassBegin->framebuffer) : nullptr;
9162    if (pCB) {
9163        if (renderPass) {
9164            uint32_t clear_op_count = 0;
9165            pCB->activeFramebuffer = pRenderPassBegin->framebuffer;
9166            for (size_t i = 0; i < renderPass->attachments.size(); ++i) {
9167                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
9168                VkFormat format = renderPass->pCreateInfo->pAttachments[renderPass->attachments[i].attachment].format;
9169                if (FormatSpecificLoadAndStoreOpSettings(format, renderPass->attachments[i].load_op,
9170                                                         renderPass->attachments[i].stencil_load_op,
9171                                                         VK_ATTACHMENT_LOAD_OP_CLEAR)) {
9172                    ++clear_op_count;
9173                    std::function<bool()> function = [=]() {
9174                        set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9175                        return false;
9176                    };
9177                    pCB->validate_functions.push_back(function);
9178                } else if (FormatSpecificLoadAndStoreOpSettings(format, renderPass->attachments[i].load_op,
9179                                                                renderPass->attachments[i].stencil_load_op,
9180                                                                VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
9181                    std::function<bool()> function = [=]() {
9182                        set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9183                        return false;
9184                    };
9185                    pCB->validate_functions.push_back(function);
9186                } else if (FormatSpecificLoadAndStoreOpSettings(format, renderPass->attachments[i].load_op,
9187                                                                renderPass->attachments[i].stencil_load_op,
9188                                                                VK_ATTACHMENT_LOAD_OP_LOAD)) {
9189                    std::function<bool()> function = [=]() {
9190                        return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9191                    };
9192                    pCB->validate_functions.push_back(function);
9193                }
9194                if (renderPass->attachment_first_read[renderPass->attachments[i].attachment]) {
9195                    std::function<bool()> function = [=]() {
9196                        return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9197                    };
9198                    pCB->validate_functions.push_back(function);
9199                }
9200            }
9201            if (clear_op_count > pRenderPassBegin->clearValueCount) {
9202                skipCall |= log_msg(
9203                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9204                    reinterpret_cast<uint64_t &>(renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
9205                    "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but the actual number "
9206                    "of attachments in renderPass 0x%" PRIx64 " that use VK_ATTACHMENT_LOAD_OP_CLEAR is %u. The clearValueCount "
9207                    "must therefore be greater than or equal to %u.",
9208                    pRenderPassBegin->clearValueCount, reinterpret_cast<uint64_t &>(renderPass), clear_op_count, clear_op_count);
9209            }
9210            skipCall |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
9211            skipCall |= VerifyFramebufferAndRenderPassLayouts(dev_data, pCB, pRenderPassBegin);
9212            skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBeginRenderPass");
9213            skipCall |= ValidateDependencies(dev_data, framebuffer, renderPass);
9214            skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdBeginRenderPass");
9215            skipCall |= addCmd(dev_data, pCB, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
9216            pCB->activeRenderPass = renderPass;
9217            // This is a shallow copy as that is all that is needed for now
9218            pCB->activeRenderPassBeginInfo = *pRenderPassBegin;
9219            pCB->activeSubpass = 0;
9220            pCB->activeSubpassContents = contents;
9221            pCB->framebuffers.insert(pRenderPassBegin->framebuffer);
9222            // Connect this framebuffer to this cmdBuffer
9223            framebuffer->referencingCmdBuffers.insert(pCB->commandBuffer);
9224        } else {
9225            skipCall |=
9226                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9227                            DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()");
9228        }
9229    }
9230    lock.unlock();
9231    if (!skipCall) {
9232        dev_data->device_dispatch_table->CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
9233    }
9234}
9235
9236VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
9237    bool skipCall = false;
9238    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9239    std::unique_lock<std::mutex> lock(global_lock);
9240    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9241    if (pCB) {
9242        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass");
9243        skipCall |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
9244        pCB->activeSubpass++;
9245        pCB->activeSubpassContents = contents;
9246        TransitionSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
9247        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass");
9248    }
9249    lock.unlock();
9250    if (!skipCall)
9251        dev_data->device_dispatch_table->CmdNextSubpass(commandBuffer, contents);
9252}
9253
9254VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
9255    bool skipCall = false;
9256    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9257    std::unique_lock<std::mutex> lock(global_lock);
9258    auto pCB = getCBNode(dev_data, commandBuffer);
9259    if (pCB) {
9260        RENDER_PASS_NODE* pRPNode = pCB->activeRenderPass;
9261        auto framebuffer = getFramebuffer(dev_data, pCB->activeFramebuffer);
9262        if (pRPNode) {
9263            for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
9264                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
9265                VkFormat format = pRPNode->pCreateInfo->pAttachments[pRPNode->attachments[i].attachment].format;
9266                if (FormatSpecificLoadAndStoreOpSettings(format, pRPNode->attachments[i].store_op,
9267                                                         pRPNode->attachments[i].stencil_store_op, VK_ATTACHMENT_STORE_OP_STORE)) {
9268                    std::function<bool()> function = [=]() {
9269                        set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9270                        return false;
9271                    };
9272                    pCB->validate_functions.push_back(function);
9273                } else if (FormatSpecificLoadAndStoreOpSettings(format, pRPNode->attachments[i].store_op,
9274                                                                pRPNode->attachments[i].stencil_store_op,
9275                                                                VK_ATTACHMENT_STORE_OP_DONT_CARE)) {
9276                    std::function<bool()> function = [=]() {
9277                        set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9278                        return false;
9279                    };
9280                    pCB->validate_functions.push_back(function);
9281                }
9282            }
9283        }
9284        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass");
9285        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass");
9286        skipCall |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
9287        TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo);
9288        pCB->activeRenderPass = nullptr;
9289        pCB->activeSubpass = 0;
9290        pCB->activeFramebuffer = VK_NULL_HANDLE;
9291    }
9292    lock.unlock();
9293    if (!skipCall)
9294        dev_data->device_dispatch_table->CmdEndRenderPass(commandBuffer);
9295}
9296
9297static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, RENDER_PASS_NODE const *secondaryPass,
9298                                        RENDER_PASS_NODE const *primaryPass, uint32_t primaryAttach, uint32_t secondaryAttach,
9299                                        const char *msg) {
9300    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9301                   DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9302                   "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p which has a render pass 0x%" PRIx64
9303                   " that is not compatible with the current render pass 0x%" PRIx64 "."
9304                   "Attachment %" PRIu32 " is not compatible with %" PRIu32 ". %s",
9305                   (void *)secondaryBuffer, (uint64_t)(secondaryPass->renderPass), (uint64_t)(primaryPass->renderPass), primaryAttach, secondaryAttach,
9306                   msg);
9307}
9308
9309static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, RENDER_PASS_NODE const *primaryPass,
9310                                            uint32_t primaryAttach, VkCommandBuffer secondaryBuffer, RENDER_PASS_NODE const *secondaryPass,
9311                                            uint32_t secondaryAttach, bool is_multi) {
9312    bool skip_call = false;
9313    if (primaryPass->pCreateInfo->attachmentCount <= primaryAttach) {
9314        primaryAttach = VK_ATTACHMENT_UNUSED;
9315    }
9316    if (secondaryPass->pCreateInfo->attachmentCount <= secondaryAttach) {
9317        secondaryAttach = VK_ATTACHMENT_UNUSED;
9318    }
9319    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
9320        return skip_call;
9321    }
9322    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
9323        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9324                                                 secondaryAttach, "The first is unused while the second is not.");
9325        return skip_call;
9326    }
9327    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
9328        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9329                                                 secondaryAttach, "The second is unused while the first is not.");
9330        return skip_call;
9331    }
9332    if (primaryPass->pCreateInfo->pAttachments[primaryAttach].format !=
9333        secondaryPass->pCreateInfo->pAttachments[secondaryAttach].format) {
9334        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9335                                                 secondaryAttach, "They have different formats.");
9336    }
9337    if (primaryPass->pCreateInfo->pAttachments[primaryAttach].samples !=
9338        secondaryPass->pCreateInfo->pAttachments[secondaryAttach].samples) {
9339        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9340                                                 secondaryAttach, "They have different samples.");
9341    }
9342    if (is_multi &&
9343        primaryPass->pCreateInfo->pAttachments[primaryAttach].flags !=
9344            secondaryPass->pCreateInfo->pAttachments[secondaryAttach].flags) {
9345        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9346                                                 secondaryAttach, "They have different flags.");
9347    }
9348    return skip_call;
9349}
9350
9351static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, RENDER_PASS_NODE const *primaryPass,
9352                                         VkCommandBuffer secondaryBuffer, RENDER_PASS_NODE const *secondaryPass, const int subpass,
9353                                         bool is_multi) {
9354    bool skip_call = false;
9355    const VkSubpassDescription &primary_desc = primaryPass->pCreateInfo->pSubpasses[subpass];
9356    const VkSubpassDescription &secondary_desc = secondaryPass->pCreateInfo->pSubpasses[subpass];
9357    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
9358    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
9359        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
9360        if (i < primary_desc.inputAttachmentCount) {
9361            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
9362        }
9363        if (i < secondary_desc.inputAttachmentCount) {
9364            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
9365        }
9366        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_input_attach, secondaryBuffer,
9367                                                     secondaryPass, secondary_input_attach, is_multi);
9368    }
9369    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
9370    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
9371        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
9372        if (i < primary_desc.colorAttachmentCount) {
9373            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
9374        }
9375        if (i < secondary_desc.colorAttachmentCount) {
9376            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
9377        }
9378        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_color_attach, secondaryBuffer,
9379                                                     secondaryPass, secondary_color_attach, is_multi);
9380        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
9381        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
9382            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
9383        }
9384        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
9385            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
9386        }
9387        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_resolve_attach, secondaryBuffer,
9388                                                     secondaryPass, secondary_resolve_attach, is_multi);
9389    }
9390    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
9391    if (primary_desc.pDepthStencilAttachment) {
9392        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
9393    }
9394    if (secondary_desc.pDepthStencilAttachment) {
9395        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
9396    }
9397    skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_depthstencil_attach, secondaryBuffer,
9398                                                 secondaryPass, secondary_depthstencil_attach, is_multi);
9399    return skip_call;
9400}
9401
9402static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9403                                            VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass) {
9404    bool skip_call = false;
9405    // Early exit if renderPass objects are identical (and therefore compatible)
9406    if (primaryPass == secondaryPass)
9407        return skip_call;
9408    auto primary_render_pass = getRenderPass(dev_data, primaryPass);
9409    auto secondary_render_pass = getRenderPass(dev_data, secondaryPass);
9410    if (!primary_render_pass) {
9411        skip_call |=
9412            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9413                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9414                    "vkCmdExecuteCommands() called w/ invalid current Cmd Buffer 0x%p which has invalid render pass 0x%" PRIx64 ".",
9415                    (void *)primaryBuffer, (uint64_t)(primaryPass));
9416        return skip_call;
9417    }
9418    if (!secondary_render_pass) {
9419        skip_call |=
9420            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9421                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9422                    "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%p which has invalid render pass 0x%" PRIx64 ".",
9423                    (void *)secondaryBuffer, (uint64_t)(secondaryPass));
9424        return skip_call;
9425    }
9426    if (primary_render_pass->pCreateInfo->subpassCount != secondary_render_pass->pCreateInfo->subpassCount) {
9427        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9428                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9429                             "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p which has a render pass 0x%" PRIx64
9430                             " that is not compatible with the current render pass 0x%" PRIx64 "."
9431                             "They have a different number of subpasses.",
9432                             (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass));
9433        return skip_call;
9434    }
9435    auto subpassCount = primary_render_pass->pCreateInfo->subpassCount;
9436    for (uint32_t i = 0; i < subpassCount; ++i) {
9437        skip_call |= validateSubpassCompatibility(dev_data, primaryBuffer, primary_render_pass, secondaryBuffer,
9438                                                  secondary_render_pass, i, subpassCount > 1);
9439    }
9440    return skip_call;
9441}
9442
9443static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
9444                                VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
9445    bool skip_call = false;
9446    if (!pSubCB->beginInfo.pInheritanceInfo) {
9447        return skip_call;
9448    }
9449    VkFramebuffer primary_fb = pCB->activeFramebuffer;
9450    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
9451    if (secondary_fb != VK_NULL_HANDLE) {
9452        if (primary_fb != secondary_fb) {
9453            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9454                                 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9455                                 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p which has a framebuffer 0x%" PRIx64
9456                                 " that is not compatible with the current framebuffer 0x%" PRIx64 ".",
9457                                 (void *)secondaryBuffer, (uint64_t)(secondary_fb), (uint64_t)(primary_fb));
9458        }
9459        auto fb = getFramebuffer(dev_data, secondary_fb);
9460        if (!fb) {
9461            skip_call |=
9462                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9463                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
9464                                                                          "which has invalid framebuffer 0x%" PRIx64 ".",
9465                        (void *)secondaryBuffer, (uint64_t)(secondary_fb));
9466            return skip_call;
9467        }
9468        skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb->createInfo.renderPass,
9469                                                     secondaryBuffer, pSubCB->beginInfo.pInheritanceInfo->renderPass);
9470    }
9471    return skip_call;
9472}
9473
9474static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
9475    bool skipCall = false;
9476    unordered_set<int> activeTypes;
9477    for (auto queryObject : pCB->activeQueries) {
9478        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9479        if (queryPoolData != dev_data->queryPoolMap.end()) {
9480            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
9481                pSubCB->beginInfo.pInheritanceInfo) {
9482                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
9483                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
9484                    skipCall |= log_msg(
9485                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9486                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9487                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
9488                        "which has invalid active query pool 0x%" PRIx64 ". Pipeline statistics is being queried so the command "
9489                        "buffer must have all bits set on the queryPool.",
9490                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first));
9491                }
9492            }
9493            activeTypes.insert(queryPoolData->second.createInfo.queryType);
9494        }
9495    }
9496    for (auto queryObject : pSubCB->startedQueries) {
9497        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9498        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
9499            skipCall |=
9500                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9501                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9502                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
9503                        "which has invalid active query pool 0x%" PRIx64 "of type %d but a query of that type has been started on "
9504                        "secondary Cmd Buffer 0x%p.",
9505                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first),
9506                        queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer));
9507        }
9508    }
9509    return skipCall;
9510}
9511
9512VKAPI_ATTR void VKAPI_CALL
9513CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
9514    bool skipCall = false;
9515    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9516    std::unique_lock<std::mutex> lock(global_lock);
9517    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9518    if (pCB) {
9519        GLOBAL_CB_NODE *pSubCB = NULL;
9520        for (uint32_t i = 0; i < commandBuffersCount; i++) {
9521            pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
9522            if (!pSubCB) {
9523                skipCall |=
9524                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9525                            DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9526                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p in element %u of pCommandBuffers array.",
9527                            (void *)pCommandBuffers[i], i);
9528            } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
9529                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9530                                    __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9531                                    "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%p in element %u of pCommandBuffers "
9532                                    "array. All cmd buffers in pCommandBuffers array must be secondary.",
9533                                    (void *)pCommandBuffers[i], i);
9534            } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
9535                if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
9536                    skipCall |= log_msg(
9537                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9538                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
9539                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) executed within render pass (0x%" PRIxLEAST64
9540                        ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
9541                        (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass->renderPass);
9542                } else {
9543                    // Make sure render pass is compatible with parent command buffer pass if has continue
9544                    skipCall |= validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass->renderPass, pCommandBuffers[i],
9545                                                                pSubCB->beginInfo.pInheritanceInfo->renderPass);
9546                    skipCall |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
9547                }
9548                string errorString = "";
9549                if (!verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->renderPass,
9550                                                     pSubCB->beginInfo.pInheritanceInfo->renderPass, errorString)) {
9551                    skipCall |= log_msg(
9552                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9553                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
9554                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) w/ render pass (0x%" PRIxLEAST64
9555                        ") is incompatible w/ primary command buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
9556                        (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer,
9557                        (uint64_t)pCB->activeRenderPass->renderPass, errorString.c_str());
9558                }
9559                //  If framebuffer for secondary CB is not NULL, then it must match FB from vkCmdBeginRenderPass()
9560                //   that this CB will be executed in AND framebuffer must have been created w/ RP compatible w/ renderpass
9561                if (pSubCB->beginInfo.pInheritanceInfo->framebuffer) {
9562                    if (pSubCB->beginInfo.pInheritanceInfo->framebuffer != pCB->activeRenderPassBeginInfo.framebuffer) {
9563                        skipCall |= log_msg(
9564                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9565                            (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS",
9566                            "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) references framebuffer (0x%" PRIxLEAST64
9567                            ") that does not match framebuffer (0x%" PRIxLEAST64 ") in active renderpass (0x%" PRIxLEAST64 ").",
9568                            (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->framebuffer,
9569                            (uint64_t)pCB->activeRenderPassBeginInfo.framebuffer, (uint64_t)pCB->activeRenderPass->renderPass);
9570                    }
9571                }
9572            }
9573            // TODO(mlentine): Move more logic into this method
9574            skipCall |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
9575            skipCall |= validateCommandBufferState(dev_data, pSubCB);
9576            // Secondary cmdBuffers are considered pending execution starting w/
9577            // being recorded
9578            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
9579                if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
9580                    skipCall |= log_msg(
9581                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9582                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
9583                        "Attempt to simultaneously execute CB 0x%" PRIxLEAST64 " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
9584                        "set!",
9585                        (uint64_t)(pCB->commandBuffer));
9586                }
9587                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
9588                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
9589                    skipCall |= log_msg(
9590                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9591                        (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
9592                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIxLEAST64
9593                        ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
9594                        "(0x%" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
9595                                          "set, even though it does.",
9596                        (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer));
9597                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
9598                }
9599            }
9600            if (!pCB->activeQueries.empty() && !dev_data->phys_dev_properties.features.inheritedQueries) {
9601                skipCall |=
9602                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9603                            reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
9604                            "vkCmdExecuteCommands(): Secondary Command Buffer "
9605                            "(0x%" PRIxLEAST64 ") cannot be submitted with a query in "
9606                            "flight and inherited queries not "
9607                            "supported on this device.",
9608                            reinterpret_cast<uint64_t>(pCommandBuffers[i]));
9609            }
9610            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
9611            pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
9612            dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
9613        }
9614        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands");
9615        skipCall |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
9616    }
9617    lock.unlock();
9618    if (!skipCall)
9619        dev_data->device_dispatch_table->CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
9620}
9621
9622static bool ValidateMapImageLayouts(VkDevice device, VkDeviceMemory mem) {
9623    bool skip_call = false;
9624    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9625    auto mem_info = getMemObjInfo(dev_data, mem);
9626    if ((mem_info) && (mem_info->image != VK_NULL_HANDLE)) {
9627        std::vector<VkImageLayout> layouts;
9628        if (FindLayouts(dev_data, mem_info->image, layouts)) {
9629            for (auto layout : layouts) {
9630                if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
9631                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9632                                         __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
9633                                                                                         "GENERAL or PREINITIALIZED are supported.",
9634                                         string_VkImageLayout(layout));
9635                }
9636            }
9637        }
9638    }
9639    return skip_call;
9640}
9641
9642VKAPI_ATTR VkResult VKAPI_CALL
9643MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
9644    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9645
9646    bool skip_call = false;
9647    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9648    std::unique_lock<std::mutex> lock(global_lock);
9649#if MTMERGESOURCE
9650    DEVICE_MEM_INFO *pMemObj = getMemObjInfo(dev_data, mem);
9651    if (pMemObj) {
9652        pMemObj->valid = true;
9653        if ((dev_data->phys_dev_mem_props.memoryTypes[pMemObj->allocInfo.memoryTypeIndex].propertyFlags &
9654             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
9655            skip_call =
9656                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9657                        (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM",
9658                        "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
9659        }
9660    }
9661    skip_call |= validateMemRange(dev_data, mem, offset, size);
9662#endif
9663    skip_call |= ValidateMapImageLayouts(device, mem);
9664    lock.unlock();
9665
9666    if (!skip_call) {
9667        result = dev_data->device_dispatch_table->MapMemory(device, mem, offset, size, flags, ppData);
9668        if (VK_SUCCESS == result) {
9669#if MTMERGESOURCE
9670            lock.lock();
9671            storeMemRanges(dev_data, mem, offset, size);
9672            initializeAndTrackMemory(dev_data, mem, size, ppData);
9673            lock.unlock();
9674#endif
9675        }
9676    }
9677    return result;
9678}
9679
9680VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
9681    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9682    bool skipCall = false;
9683
9684    std::unique_lock<std::mutex> lock(global_lock);
9685    skipCall |= deleteMemRanges(my_data, mem);
9686    lock.unlock();
9687    if (!skipCall) {
9688        my_data->device_dispatch_table->UnmapMemory(device, mem);
9689    }
9690}
9691
9692static bool validateMemoryIsMapped(layer_data *my_data, const char *funcName, uint32_t memRangeCount,
9693                                   const VkMappedMemoryRange *pMemRanges) {
9694    bool skipCall = false;
9695    for (uint32_t i = 0; i < memRangeCount; ++i) {
9696        auto mem_info = getMemObjInfo(my_data, pMemRanges[i].memory);
9697        if (mem_info) {
9698            if (mem_info->memRange.offset > pMemRanges[i].offset) {
9699                skipCall |=
9700                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9701                            (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
9702                            "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
9703                            "(" PRINTF_SIZE_T_SPECIFIER ").",
9704                            funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_info->memRange.offset));
9705            }
9706
9707            const uint64_t my_dataTerminus =
9708                    (mem_info->memRange.size == VK_WHOLE_SIZE) ? mem_info->allocInfo.allocationSize :
9709                                                                           (mem_info->memRange.offset + mem_info->memRange.size);
9710            if (pMemRanges[i].size != VK_WHOLE_SIZE && (my_dataTerminus < (pMemRanges[i].offset + pMemRanges[i].size))) {
9711                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9712                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
9713                                    MEMTRACK_INVALID_MAP, "MEM", "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER
9714                                                                 ") exceeds the Memory Object's upper-bound "
9715                                                                 "(" PRINTF_SIZE_T_SPECIFIER ").",
9716                                    funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
9717                                    static_cast<size_t>(my_dataTerminus));
9718            }
9719        }
9720    }
9721    return skipCall;
9722}
9723
9724static bool validateAndCopyNoncoherentMemoryToDriver(layer_data *my_data, uint32_t memRangeCount,
9725                                                     const VkMappedMemoryRange *pMemRanges) {
9726    bool skipCall = false;
9727    for (uint32_t i = 0; i < memRangeCount; ++i) {
9728        auto mem_info = getMemObjInfo(my_data, pMemRanges[i].memory);
9729        if (mem_info) {
9730            if (mem_info->pData) {
9731                VkDeviceSize size = mem_info->memRange.size;
9732                VkDeviceSize half_size = (size / 2);
9733                char *data = static_cast<char *>(mem_info->pData);
9734                for (auto j = 0; j < half_size; ++j) {
9735                    if (data[j] != NoncoherentMemoryFillValue) {
9736                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9737                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
9738                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj 0x%" PRIxLEAST64,
9739                                            (uint64_t)pMemRanges[i].memory);
9740                    }
9741                }
9742                for (auto j = size + half_size; j < 2 * size; ++j) {
9743                    if (data[j] != NoncoherentMemoryFillValue) {
9744                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9745                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
9746                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj 0x%" PRIxLEAST64,
9747                                            (uint64_t)pMemRanges[i].memory);
9748                    }
9749                }
9750                memcpy(mem_info->pDriverData, static_cast<void *>(data + (size_t)(half_size)), (size_t)(size));
9751            }
9752        }
9753    }
9754    return skipCall;
9755}
9756
9757VkResult VKAPI_CALL
9758FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
9759    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9760    bool skipCall = false;
9761    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9762
9763    std::unique_lock<std::mutex> lock(global_lock);
9764    skipCall |= validateAndCopyNoncoherentMemoryToDriver(my_data, memRangeCount, pMemRanges);
9765    skipCall |= validateMemoryIsMapped(my_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
9766    lock.unlock();
9767    if (!skipCall) {
9768        result = my_data->device_dispatch_table->FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
9769    }
9770    return result;
9771}
9772
9773VkResult VKAPI_CALL
9774InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
9775    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9776    bool skipCall = false;
9777    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9778
9779    std::unique_lock<std::mutex> lock(global_lock);
9780    skipCall |= validateMemoryIsMapped(my_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
9781    lock.unlock();
9782    if (!skipCall) {
9783        result = my_data->device_dispatch_table->InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
9784    }
9785    return result;
9786}
9787
9788VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
9789    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9790    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9791    bool skipCall = false;
9792    std::unique_lock<std::mutex> lock(global_lock);
9793    auto image_node = getImageNode(dev_data, image);
9794    if (image_node) {
9795        // Track objects tied to memory
9796        uint64_t image_handle = reinterpret_cast<uint64_t &>(image);
9797        skipCall = set_mem_binding(dev_data, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
9798        VkMemoryRequirements memRequirements;
9799        lock.unlock();
9800        dev_data->device_dispatch_table->GetImageMemoryRequirements(device, image, &memRequirements);
9801        lock.lock();
9802
9803        // Track and validate bound memory range information
9804        auto mem_info = getMemObjInfo(dev_data, mem);
9805        if (mem_info) {
9806            const MEMORY_RANGE range =
9807                insert_memory_ranges(image_handle, mem, memoryOffset, memRequirements, mem_info->imageRanges);
9808            skipCall |= validate_memory_range(dev_data, mem_info->bufferRanges, range, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
9809            skipCall |= ValidateMemoryTypes(dev_data, mem_info, memRequirements.memoryTypeBits, "vkBindImageMemory");
9810        }
9811
9812        print_mem_list(dev_data);
9813        lock.unlock();
9814        if (!skipCall) {
9815            result = dev_data->device_dispatch_table->BindImageMemory(device, image, mem, memoryOffset);
9816            lock.lock();
9817            dev_data->memObjMap[mem].get()->image = image;
9818            image_node->mem = mem;
9819            image_node->memOffset = memoryOffset;
9820            image_node->memSize = memRequirements.size;
9821            lock.unlock();
9822        }
9823    } else {
9824        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9825                reinterpret_cast<const uint64_t &>(image), __LINE__, MEMTRACK_INVALID_OBJECT, "MT",
9826                "vkBindImageMemory: Cannot find invalid image 0x%" PRIx64 ", has it already been deleted?",
9827                reinterpret_cast<const uint64_t &>(image));
9828    }
9829    return result;
9830}
9831
9832VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
9833    bool skip_call = false;
9834    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9835    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9836    std::unique_lock<std::mutex> lock(global_lock);
9837    auto event_node = dev_data->eventMap.find(event);
9838    if (event_node != dev_data->eventMap.end()) {
9839        event_node->second.needsSignaled = false;
9840        event_node->second.stageMask = VK_PIPELINE_STAGE_HOST_BIT;
9841        if (event_node->second.write_in_use) {
9842            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
9843                                 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9844                                 "Cannot call vkSetEvent() on event 0x%" PRIxLEAST64 " that is already in use by a command buffer.",
9845                                 reinterpret_cast<const uint64_t &>(event));
9846        }
9847    }
9848    lock.unlock();
9849    // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
9850    // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
9851    // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
9852    for (auto queue_data : dev_data->queueMap) {
9853        auto event_entry = queue_data.second.eventToStageMap.find(event);
9854        if (event_entry != queue_data.second.eventToStageMap.end()) {
9855            event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
9856        }
9857    }
9858    if (!skip_call)
9859        result = dev_data->device_dispatch_table->SetEvent(device, event);
9860    return result;
9861}
9862
9863VKAPI_ATTR VkResult VKAPI_CALL
9864QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
9865    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
9866    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9867    bool skip_call = false;
9868    std::unique_lock<std::mutex> lock(global_lock);
9869    auto pFence = getFenceNode(dev_data, fence);
9870    auto pQueue = getQueueNode(dev_data, queue);
9871
9872    // First verify that fence is not in use
9873    skip_call |= ValidateFenceForSubmit(dev_data, pFence);
9874
9875    if (fence != VK_NULL_HANDLE) {
9876        SubmitFence(pQueue, pFence);
9877    }
9878
9879    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
9880        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
9881        // Track objects tied to memory
9882        for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
9883            for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
9884                if (set_sparse_mem_binding(dev_data, bindInfo.pBufferBinds[j].pBinds[k].memory,
9885                                           (uint64_t)bindInfo.pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
9886                                           "vkQueueBindSparse"))
9887                    skip_call = true;
9888            }
9889        }
9890        for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
9891            for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
9892                if (set_sparse_mem_binding(dev_data, bindInfo.pImageOpaqueBinds[j].pBinds[k].memory,
9893                                           (uint64_t)bindInfo.pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9894                                           "vkQueueBindSparse"))
9895                    skip_call = true;
9896            }
9897        }
9898        for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
9899            for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
9900                if (set_sparse_mem_binding(dev_data, bindInfo.pImageBinds[j].pBinds[k].memory,
9901                                           (uint64_t)bindInfo.pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9902                                           "vkQueueBindSparse"))
9903                    skip_call = true;
9904            }
9905        }
9906        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
9907            VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
9908            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
9909            if (pSemaphore) {
9910                if (pSemaphore->signaled) {
9911                    pSemaphore->signaled = false;
9912                } else {
9913                    skip_call |=
9914                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
9915                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9916                                "vkQueueBindSparse: Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64
9917                                " that has no way to be signaled.",
9918                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
9919                }
9920            }
9921        }
9922        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
9923            VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
9924            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
9925            if (pSemaphore) {
9926                if (pSemaphore->signaled) {
9927                    skip_call =
9928                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
9929                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9930                                "vkQueueBindSparse: Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
9931                                ", but that semaphore is already signaled.",
9932                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
9933                }
9934                pSemaphore->signaled = true;
9935            }
9936        }
9937    }
9938    print_mem_list(dev_data);
9939    lock.unlock();
9940
9941    if (!skip_call)
9942        return dev_data->device_dispatch_table->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
9943
9944    return result;
9945}
9946
9947VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
9948                                               const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
9949    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9950    VkResult result = dev_data->device_dispatch_table->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
9951    if (result == VK_SUCCESS) {
9952        std::lock_guard<std::mutex> lock(global_lock);
9953        SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
9954        sNode->signaled = false;
9955        sNode->queue = VK_NULL_HANDLE;
9956        sNode->in_use.store(0);
9957    }
9958    return result;
9959}
9960
9961VKAPI_ATTR VkResult VKAPI_CALL
9962CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
9963    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9964    VkResult result = dev_data->device_dispatch_table->CreateEvent(device, pCreateInfo, pAllocator, pEvent);
9965    if (result == VK_SUCCESS) {
9966        std::lock_guard<std::mutex> lock(global_lock);
9967        dev_data->eventMap[*pEvent].needsSignaled = false;
9968        dev_data->eventMap[*pEvent].in_use.store(0);
9969        dev_data->eventMap[*pEvent].write_in_use = 0;
9970        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
9971    }
9972    return result;
9973}
9974
9975VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
9976                                                  const VkAllocationCallbacks *pAllocator,
9977                                                  VkSwapchainKHR *pSwapchain) {
9978    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9979    VkResult result = dev_data->device_dispatch_table->CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
9980
9981    if (VK_SUCCESS == result) {
9982        std::lock_guard<std::mutex> lock(global_lock);
9983        dev_data->device_extensions.swapchainMap[*pSwapchain] = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo));
9984    }
9985
9986    return result;
9987}
9988
9989VKAPI_ATTR void VKAPI_CALL
9990DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
9991    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9992    bool skipCall = false;
9993
9994    std::unique_lock<std::mutex> lock(global_lock);
9995    auto swapchain_data = getSwapchainNode(dev_data, swapchain);
9996    if (swapchain_data) {
9997        if (swapchain_data->images.size() > 0) {
9998            for (auto swapchain_image : swapchain_data->images) {
9999                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
10000                if (image_sub != dev_data->imageSubresourceMap.end()) {
10001                    for (auto imgsubpair : image_sub->second) {
10002                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
10003                        if (image_item != dev_data->imageLayoutMap.end()) {
10004                            dev_data->imageLayoutMap.erase(image_item);
10005                        }
10006                    }
10007                    dev_data->imageSubresourceMap.erase(image_sub);
10008                }
10009                skipCall = clear_object_binding(dev_data, (uint64_t)swapchain_image,
10010                                                VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
10011                dev_data->imageMap.erase(swapchain_image);
10012            }
10013        }
10014        dev_data->device_extensions.swapchainMap.erase(swapchain);
10015    }
10016    lock.unlock();
10017    if (!skipCall)
10018        dev_data->device_dispatch_table->DestroySwapchainKHR(device, swapchain, pAllocator);
10019}
10020
10021VKAPI_ATTR VkResult VKAPI_CALL
10022GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
10023    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10024    VkResult result = dev_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
10025
10026    if (result == VK_SUCCESS && pSwapchainImages != NULL) {
10027        // This should never happen and is checked by param checker.
10028        if (!pCount)
10029            return result;
10030        std::lock_guard<std::mutex> lock(global_lock);
10031        const size_t count = *pCount;
10032        auto swapchain_node = getSwapchainNode(dev_data, swapchain);
10033        if (swapchain_node && !swapchain_node->images.empty()) {
10034            // TODO : Not sure I like the memcmp here, but it works
10035            const bool mismatch = (swapchain_node->images.size() != count ||
10036                                   memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
10037            if (mismatch) {
10038                // TODO: Verify against Valid Usage section of extension
10039                log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10040                        (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
10041                        "vkGetSwapchainInfoKHR(0x%" PRIx64
10042                        ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
10043                        (uint64_t)(swapchain));
10044            }
10045        }
10046        for (uint32_t i = 0; i < *pCount; ++i) {
10047            IMAGE_LAYOUT_NODE image_layout_node;
10048            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
10049            image_layout_node.format = swapchain_node->createInfo.imageFormat;
10050            // Add imageMap entries for each swapchain image
10051            VkImageCreateInfo image_ci = {};
10052            image_ci.mipLevels = 1;
10053            image_ci.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
10054            image_ci.usage = swapchain_node->createInfo.imageUsage;
10055            image_ci.format = swapchain_node->createInfo.imageFormat;
10056            image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
10057            image_ci.extent.width = swapchain_node->createInfo.imageExtent.width;
10058            image_ci.extent.height = swapchain_node->createInfo.imageExtent.height;
10059            image_ci.sharingMode = swapchain_node->createInfo.imageSharingMode;
10060            dev_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_NODE>(new IMAGE_NODE(&image_ci));
10061            auto &image_node = dev_data->imageMap[pSwapchainImages[i]];
10062            image_node->valid = false;
10063            image_node->mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
10064            swapchain_node->images.push_back(pSwapchainImages[i]);
10065            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
10066            dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
10067            dev_data->imageLayoutMap[subpair] = image_layout_node;
10068            dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
10069        }
10070    }
10071    return result;
10072}
10073
10074VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
10075    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10076    bool skip_call = false;
10077
10078    std::lock_guard<std::mutex> lock(global_lock);
10079    for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10080        auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
10081        if (pSemaphore && !pSemaphore->signaled) {
10082            skip_call |=
10083                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10084                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10085                            "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
10086                            reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(pPresentInfo->pWaitSemaphores[i]));
10087        }
10088    }
10089    VkDeviceMemory mem;
10090    for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
10091        auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
10092        if (swapchain_data && pPresentInfo->pImageIndices[i] < swapchain_data->images.size()) {
10093            VkImage image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
10094#if MTMERGESOURCE
10095            skip_call |=
10096                    get_mem_binding_from_object(dev_data, (uint64_t)(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
10097            skip_call |= validate_memory_is_valid(dev_data, mem, "vkQueuePresentKHR()", image);
10098#endif
10099            vector<VkImageLayout> layouts;
10100            if (FindLayouts(dev_data, image, layouts)) {
10101                for (auto layout : layouts) {
10102                    if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
10103                        skip_call |=
10104                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
10105                                        reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10106                                        "Images passed to present must be in layout "
10107                                        "PRESENT_SOURCE_KHR but is in %s",
10108                                        string_VkImageLayout(layout));
10109                    }
10110                }
10111            }
10112        }
10113    }
10114
10115    if (skip_call) {
10116        return VK_ERROR_VALIDATION_FAILED_EXT;
10117    }
10118
10119    VkResult result = dev_data->device_dispatch_table->QueuePresentKHR(queue, pPresentInfo);
10120
10121    if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
10122        // Semaphore waits occur before error generation, if the call reached
10123        // the ICD. (Confirm?)
10124        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10125            auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
10126            if (pSemaphore && pSemaphore->signaled) {
10127                pSemaphore->signaled = false;
10128            }
10129        }
10130    }
10131
10132    return result;
10133}
10134
10135VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
10136                                                   VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
10137    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10138    bool skipCall = false;
10139
10140    std::unique_lock<std::mutex> lock(global_lock);
10141    auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
10142    if (pSemaphore && pSemaphore->signaled) {
10143        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10144                           reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10145                           "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state");
10146    }
10147
10148    auto pFence = getFenceNode(dev_data, fence);
10149    if (pFence) {
10150        skipCall |= ValidateFenceForSubmit(dev_data, pFence);
10151    }
10152    lock.unlock();
10153
10154    if (skipCall)
10155        return VK_ERROR_VALIDATION_FAILED_EXT;
10156
10157    VkResult result =
10158            dev_data->device_dispatch_table->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
10159
10160    lock.lock();
10161    if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
10162        if (pFence) {
10163            pFence->state = FENCE_INFLIGHT;
10164        }
10165
10166        // A successful call to AcquireNextImageKHR counts as a signal operation on semaphore
10167        if (pSemaphore) {
10168            pSemaphore->signaled = true;
10169        }
10170    }
10171    lock.unlock();
10172
10173    return result;
10174}
10175
10176VKAPI_ATTR VkResult VKAPI_CALL
10177CreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
10178                             const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
10179    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10180    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10181    VkResult res = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
10182    if (VK_SUCCESS == res) {
10183        std::lock_guard<std::mutex> lock(global_lock);
10184        res = layer_create_msg_callback(my_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
10185    }
10186    return res;
10187}
10188
10189VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance,
10190                                                         VkDebugReportCallbackEXT msgCallback,
10191                                                         const VkAllocationCallbacks *pAllocator) {
10192    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10193    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10194    pTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
10195    std::lock_guard<std::mutex> lock(global_lock);
10196    layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator);
10197}
10198
10199VKAPI_ATTR void VKAPI_CALL
10200DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
10201                      size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
10202    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10203    my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
10204                                                            pMsg);
10205}
10206
10207VKAPI_ATTR VkResult VKAPI_CALL
10208EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
10209    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
10210}
10211
10212VKAPI_ATTR VkResult VKAPI_CALL
10213EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
10214    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
10215}
10216
10217VKAPI_ATTR VkResult VKAPI_CALL
10218EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
10219    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
10220        return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
10221
10222    return VK_ERROR_LAYER_NOT_PRESENT;
10223}
10224
10225VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
10226                                                                  const char *pLayerName, uint32_t *pCount,
10227                                                                  VkExtensionProperties *pProperties) {
10228    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
10229        return util_GetExtensionProperties(0, NULL, pCount, pProperties);
10230
10231    assert(physicalDevice);
10232
10233    dispatch_key key = get_dispatch_key(physicalDevice);
10234    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
10235    return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
10236}
10237
10238static PFN_vkVoidFunction
10239intercept_core_instance_command(const char *name);
10240
10241static PFN_vkVoidFunction
10242intercept_core_device_command(const char *name);
10243
10244static PFN_vkVoidFunction
10245intercept_khr_swapchain_command(const char *name, VkDevice dev);
10246
10247VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice dev, const char *funcName) {
10248    PFN_vkVoidFunction proc = intercept_core_device_command(funcName);
10249    if (proc)
10250        return proc;
10251
10252    assert(dev);
10253
10254    proc = intercept_khr_swapchain_command(funcName, dev);
10255    if (proc)
10256        return proc;
10257
10258    layer_data *dev_data;
10259    dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
10260
10261    VkLayerDispatchTable *pTable = dev_data->device_dispatch_table;
10262    {
10263        if (pTable->GetDeviceProcAddr == NULL)
10264            return NULL;
10265        return pTable->GetDeviceProcAddr(dev, funcName);
10266    }
10267}
10268
10269VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
10270    PFN_vkVoidFunction proc = intercept_core_instance_command(funcName);
10271    if (!proc)
10272        proc = intercept_core_device_command(funcName);
10273    if (!proc)
10274        proc = intercept_khr_swapchain_command(funcName, VK_NULL_HANDLE);
10275    if (proc)
10276        return proc;
10277
10278    assert(instance);
10279
10280    layer_data *my_data;
10281    my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10282    proc = debug_report_get_instance_proc_addr(my_data->report_data, funcName);
10283    if (proc)
10284        return proc;
10285
10286    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10287    if (pTable->GetInstanceProcAddr == NULL)
10288        return NULL;
10289    return pTable->GetInstanceProcAddr(instance, funcName);
10290}
10291
10292static PFN_vkVoidFunction
10293intercept_core_instance_command(const char *name) {
10294    static const struct {
10295        const char *name;
10296        PFN_vkVoidFunction proc;
10297    } core_instance_commands[] = {
10298        { "vkGetInstanceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetInstanceProcAddr) },
10299        { "vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr) },
10300        { "vkCreateInstance", reinterpret_cast<PFN_vkVoidFunction>(CreateInstance) },
10301        { "vkCreateDevice", reinterpret_cast<PFN_vkVoidFunction>(CreateDevice) },
10302        { "vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance) },
10303        { "vkEnumerateInstanceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceLayerProperties) },
10304        { "vkEnumerateDeviceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceLayerProperties) },
10305        { "vkEnumerateInstanceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceExtensionProperties) },
10306        { "vkEnumerateDeviceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceExtensionProperties) },
10307    };
10308
10309    for (size_t i = 0; i < ARRAY_SIZE(core_instance_commands); i++) {
10310        if (!strcmp(core_instance_commands[i].name, name))
10311            return core_instance_commands[i].proc;
10312    }
10313
10314    return nullptr;
10315}
10316
10317static PFN_vkVoidFunction
10318intercept_core_device_command(const char *name) {
10319    static const struct {
10320        const char *name;
10321        PFN_vkVoidFunction proc;
10322    } core_device_commands[] = {
10323        {"vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr)},
10324        {"vkQueueSubmit", reinterpret_cast<PFN_vkVoidFunction>(QueueSubmit)},
10325        {"vkWaitForFences", reinterpret_cast<PFN_vkVoidFunction>(WaitForFences)},
10326        {"vkGetFenceStatus", reinterpret_cast<PFN_vkVoidFunction>(GetFenceStatus)},
10327        {"vkQueueWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(QueueWaitIdle)},
10328        {"vkDeviceWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(DeviceWaitIdle)},
10329        {"vkGetDeviceQueue", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceQueue)},
10330        {"vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance)},
10331        {"vkDestroyDevice", reinterpret_cast<PFN_vkVoidFunction>(DestroyDevice)},
10332        {"vkDestroyFence", reinterpret_cast<PFN_vkVoidFunction>(DestroyFence)},
10333        {"vkResetFences", reinterpret_cast<PFN_vkVoidFunction>(ResetFences)},
10334        {"vkDestroySemaphore", reinterpret_cast<PFN_vkVoidFunction>(DestroySemaphore)},
10335        {"vkDestroyEvent", reinterpret_cast<PFN_vkVoidFunction>(DestroyEvent)},
10336        {"vkDestroyQueryPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyQueryPool)},
10337        {"vkDestroyBuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyBuffer)},
10338        {"vkDestroyBufferView", reinterpret_cast<PFN_vkVoidFunction>(DestroyBufferView)},
10339        {"vkDestroyImage", reinterpret_cast<PFN_vkVoidFunction>(DestroyImage)},
10340        {"vkDestroyImageView", reinterpret_cast<PFN_vkVoidFunction>(DestroyImageView)},
10341        {"vkDestroyShaderModule", reinterpret_cast<PFN_vkVoidFunction>(DestroyShaderModule)},
10342        {"vkDestroyPipeline", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipeline)},
10343        {"vkDestroyPipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineLayout)},
10344        {"vkDestroySampler", reinterpret_cast<PFN_vkVoidFunction>(DestroySampler)},
10345        {"vkDestroyDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorSetLayout)},
10346        {"vkDestroyDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorPool)},
10347        {"vkDestroyFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyFramebuffer)},
10348        {"vkDestroyRenderPass", reinterpret_cast<PFN_vkVoidFunction>(DestroyRenderPass)},
10349        {"vkCreateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateBuffer)},
10350        {"vkCreateBufferView", reinterpret_cast<PFN_vkVoidFunction>(CreateBufferView)},
10351        {"vkCreateImage", reinterpret_cast<PFN_vkVoidFunction>(CreateImage)},
10352        {"vkCreateImageView", reinterpret_cast<PFN_vkVoidFunction>(CreateImageView)},
10353        {"vkCreateFence", reinterpret_cast<PFN_vkVoidFunction>(CreateFence)},
10354        {"vkCreatePipelineCache", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineCache)},
10355        {"vkDestroyPipelineCache", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineCache)},
10356        {"vkGetPipelineCacheData", reinterpret_cast<PFN_vkVoidFunction>(GetPipelineCacheData)},
10357        {"vkMergePipelineCaches", reinterpret_cast<PFN_vkVoidFunction>(MergePipelineCaches)},
10358        {"vkCreateGraphicsPipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateGraphicsPipelines)},
10359        {"vkCreateComputePipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateComputePipelines)},
10360        {"vkCreateSampler", reinterpret_cast<PFN_vkVoidFunction>(CreateSampler)},
10361        {"vkCreateDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorSetLayout)},
10362        {"vkCreatePipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineLayout)},
10363        {"vkCreateDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorPool)},
10364        {"vkResetDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(ResetDescriptorPool)},
10365        {"vkAllocateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(AllocateDescriptorSets)},
10366        {"vkFreeDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(FreeDescriptorSets)},
10367        {"vkUpdateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(UpdateDescriptorSets)},
10368        {"vkCreateCommandPool", reinterpret_cast<PFN_vkVoidFunction>(CreateCommandPool)},
10369        {"vkDestroyCommandPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyCommandPool)},
10370        {"vkResetCommandPool", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandPool)},
10371        {"vkCreateQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CreateQueryPool)},
10372        {"vkAllocateCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(AllocateCommandBuffers)},
10373        {"vkFreeCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(FreeCommandBuffers)},
10374        {"vkBeginCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(BeginCommandBuffer)},
10375        {"vkEndCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(EndCommandBuffer)},
10376        {"vkResetCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandBuffer)},
10377        {"vkCmdBindPipeline", reinterpret_cast<PFN_vkVoidFunction>(CmdBindPipeline)},
10378        {"vkCmdSetViewport", reinterpret_cast<PFN_vkVoidFunction>(CmdSetViewport)},
10379        {"vkCmdSetScissor", reinterpret_cast<PFN_vkVoidFunction>(CmdSetScissor)},
10380        {"vkCmdSetLineWidth", reinterpret_cast<PFN_vkVoidFunction>(CmdSetLineWidth)},
10381        {"vkCmdSetDepthBias", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBias)},
10382        {"vkCmdSetBlendConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdSetBlendConstants)},
10383        {"vkCmdSetDepthBounds", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBounds)},
10384        {"vkCmdSetStencilCompareMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilCompareMask)},
10385        {"vkCmdSetStencilWriteMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilWriteMask)},
10386        {"vkCmdSetStencilReference", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilReference)},
10387        {"vkCmdBindDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(CmdBindDescriptorSets)},
10388        {"vkCmdBindVertexBuffers", reinterpret_cast<PFN_vkVoidFunction>(CmdBindVertexBuffers)},
10389        {"vkCmdBindIndexBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdBindIndexBuffer)},
10390        {"vkCmdDraw", reinterpret_cast<PFN_vkVoidFunction>(CmdDraw)},
10391        {"vkCmdDrawIndexed", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexed)},
10392        {"vkCmdDrawIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndirect)},
10393        {"vkCmdDrawIndexedIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexedIndirect)},
10394        {"vkCmdDispatch", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatch)},
10395        {"vkCmdDispatchIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatchIndirect)},
10396        {"vkCmdCopyBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBuffer)},
10397        {"vkCmdCopyImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImage)},
10398        {"vkCmdBlitImage", reinterpret_cast<PFN_vkVoidFunction>(CmdBlitImage)},
10399        {"vkCmdCopyBufferToImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBufferToImage)},
10400        {"vkCmdCopyImageToBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImageToBuffer)},
10401        {"vkCmdUpdateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdUpdateBuffer)},
10402        {"vkCmdFillBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdFillBuffer)},
10403        {"vkCmdClearColorImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearColorImage)},
10404        {"vkCmdClearDepthStencilImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearDepthStencilImage)},
10405        {"vkCmdClearAttachments", reinterpret_cast<PFN_vkVoidFunction>(CmdClearAttachments)},
10406        {"vkCmdResolveImage", reinterpret_cast<PFN_vkVoidFunction>(CmdResolveImage)},
10407        {"vkCmdSetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdSetEvent)},
10408        {"vkCmdResetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdResetEvent)},
10409        {"vkCmdWaitEvents", reinterpret_cast<PFN_vkVoidFunction>(CmdWaitEvents)},
10410        {"vkCmdPipelineBarrier", reinterpret_cast<PFN_vkVoidFunction>(CmdPipelineBarrier)},
10411        {"vkCmdBeginQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginQuery)},
10412        {"vkCmdEndQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdEndQuery)},
10413        {"vkCmdResetQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CmdResetQueryPool)},
10414        {"vkCmdCopyQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyQueryPoolResults)},
10415        {"vkCmdPushConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdPushConstants)},
10416        {"vkCmdWriteTimestamp", reinterpret_cast<PFN_vkVoidFunction>(CmdWriteTimestamp)},
10417        {"vkCreateFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateFramebuffer)},
10418        {"vkCreateShaderModule", reinterpret_cast<PFN_vkVoidFunction>(CreateShaderModule)},
10419        {"vkCreateRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CreateRenderPass)},
10420        {"vkCmdBeginRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginRenderPass)},
10421        {"vkCmdNextSubpass", reinterpret_cast<PFN_vkVoidFunction>(CmdNextSubpass)},
10422        {"vkCmdEndRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdEndRenderPass)},
10423        {"vkCmdExecuteCommands", reinterpret_cast<PFN_vkVoidFunction>(CmdExecuteCommands)},
10424        {"vkSetEvent", reinterpret_cast<PFN_vkVoidFunction>(SetEvent)},
10425        {"vkMapMemory", reinterpret_cast<PFN_vkVoidFunction>(MapMemory)},
10426        {"vkUnmapMemory", reinterpret_cast<PFN_vkVoidFunction>(UnmapMemory)},
10427        {"vkFlushMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(FlushMappedMemoryRanges)},
10428        {"vkInvalidateMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(InvalidateMappedMemoryRanges)},
10429        {"vkAllocateMemory", reinterpret_cast<PFN_vkVoidFunction>(AllocateMemory)},
10430        {"vkFreeMemory", reinterpret_cast<PFN_vkVoidFunction>(FreeMemory)},
10431        {"vkBindBufferMemory", reinterpret_cast<PFN_vkVoidFunction>(BindBufferMemory)},
10432        {"vkGetBufferMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetBufferMemoryRequirements)},
10433        {"vkGetImageMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetImageMemoryRequirements)},
10434        {"vkGetQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(GetQueryPoolResults)},
10435        {"vkBindImageMemory", reinterpret_cast<PFN_vkVoidFunction>(BindImageMemory)},
10436        {"vkQueueBindSparse", reinterpret_cast<PFN_vkVoidFunction>(QueueBindSparse)},
10437        {"vkCreateSemaphore", reinterpret_cast<PFN_vkVoidFunction>(CreateSemaphore)},
10438        {"vkCreateEvent", reinterpret_cast<PFN_vkVoidFunction>(CreateEvent)},
10439    };
10440
10441    for (size_t i = 0; i < ARRAY_SIZE(core_device_commands); i++) {
10442        if (!strcmp(core_device_commands[i].name, name))
10443            return core_device_commands[i].proc;
10444    }
10445
10446    return nullptr;
10447}
10448
10449static PFN_vkVoidFunction
10450intercept_khr_swapchain_command(const char *name, VkDevice dev) {
10451    static const struct {
10452        const char *name;
10453        PFN_vkVoidFunction proc;
10454    } khr_swapchain_commands[] = {
10455        { "vkCreateSwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateSwapchainKHR) },
10456        { "vkDestroySwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySwapchainKHR) },
10457        { "vkGetSwapchainImagesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetSwapchainImagesKHR) },
10458        { "vkAcquireNextImageKHR", reinterpret_cast<PFN_vkVoidFunction>(AcquireNextImageKHR) },
10459        { "vkQueuePresentKHR", reinterpret_cast<PFN_vkVoidFunction>(QueuePresentKHR) },
10460    };
10461
10462    if (dev) {
10463        layer_data *dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
10464        if (!dev_data->device_extensions.wsi_enabled)
10465            return nullptr;
10466    }
10467
10468    for (size_t i = 0; i < ARRAY_SIZE(khr_swapchain_commands); i++) {
10469        if (!strcmp(khr_swapchain_commands[i].name, name))
10470            return khr_swapchain_commands[i].proc;
10471    }
10472
10473    return nullptr;
10474}
10475
10476} // namespace core_validation
10477
10478// vk_layer_logging.h expects these to be defined
10479
10480VKAPI_ATTR VkResult VKAPI_CALL
10481vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
10482                               const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
10483    return core_validation::CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
10484}
10485
10486VKAPI_ATTR void VKAPI_CALL
10487vkDestroyDebugReportCallbackEXT(VkInstance instance,
10488                                VkDebugReportCallbackEXT msgCallback,
10489                                const VkAllocationCallbacks *pAllocator) {
10490    core_validation::DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
10491}
10492
10493VKAPI_ATTR void VKAPI_CALL
10494vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
10495                        size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
10496    core_validation::DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
10497}
10498
10499// loader-layer interface v0, just wrappers since there is only a layer
10500
10501VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10502vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
10503    return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
10504}
10505
10506VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10507vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
10508    return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
10509}
10510
10511VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10512vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
10513    // the layer command handles VK_NULL_HANDLE just fine internally
10514    assert(physicalDevice == VK_NULL_HANDLE);
10515    return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
10516}
10517
10518VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
10519                                                                                    const char *pLayerName, uint32_t *pCount,
10520                                                                                    VkExtensionProperties *pProperties) {
10521    // the layer command handles VK_NULL_HANDLE just fine internally
10522    assert(physicalDevice == VK_NULL_HANDLE);
10523    return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
10524}
10525
10526VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
10527    return core_validation::GetDeviceProcAddr(dev, funcName);
10528}
10529
10530VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
10531    return core_validation::GetInstanceProcAddr(instance, funcName);
10532}
10533