core_validation.cpp revision e7f1fab24ce4b59bce87246a510b7e52638f83d0
1/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
25 */
26
27// Allow use of STL min and max functions in Windows
28#define NOMINMAX
29
30// Turn on mem_tracker merged code
31#define MTMERGESOURCE 1
32
33#include <SPIRV/spirv.hpp>
34#include <algorithm>
35#include <assert.h>
36#include <iostream>
37#include <list>
38#include <map>
39#include <mutex>
40#include <set>
41//#include <memory>
42#include <stdio.h>
43#include <stdlib.h>
44#include <string.h>
45#include <string>
46#include <tuple>
47
48#include "vk_loader_platform.h"
49#include "vk_dispatch_table_helper.h"
50#include "vk_struct_string_helper_cpp.h"
51#if defined(__GNUC__)
52#pragma GCC diagnostic ignored "-Wwrite-strings"
53#endif
54#if defined(__GNUC__)
55#pragma GCC diagnostic warning "-Wwrite-strings"
56#endif
57#include "vk_struct_size_helper.h"
58#include "core_validation.h"
59#include "vk_layer_table.h"
60#include "vk_layer_data.h"
61#include "vk_layer_extension_utils.h"
62#include "vk_layer_utils.h"
63#include "spirv-tools/libspirv.h"
64
65#if defined __ANDROID__
66#include <android/log.h>
67#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
68#else
69#define LOGCONSOLE(...)                                                                                                            \
70    {                                                                                                                              \
71        printf(__VA_ARGS__);                                                                                                       \
72        printf("\n");                                                                                                              \
73    }
74#endif
75
76using namespace std;
77
78// TODO : CB really needs it's own class and files so this is just temp code until that happens
79GLOBAL_CB_NODE::~GLOBAL_CB_NODE() {
80    for (uint32_t i=0; i<VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
81        // Make sure that no sets hold onto deleted CB binding
82        for (auto set : lastBound[i].uniqueBoundSets) {
83            set->RemoveBoundCommandBuffer(this);
84        }
85    }
86}
87
88namespace core_validation {
89
90using std::unordered_map;
91using std::unordered_set;
92
93// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
94// Object value will be used to identify them internally.
95static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
96
97struct devExts {
98    bool wsi_enabled;
99    unordered_map<VkSwapchainKHR, unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
100    unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
101};
102
103// fwd decls
104struct shader_module;
105
106// TODO : Split this into separate structs for instance and device level data?
107struct layer_data {
108    VkInstance instance;
109    unique_ptr<INSTANCE_STATE> instance_state;
110
111
112    debug_report_data *report_data;
113    std::vector<VkDebugReportCallbackEXT> logging_callback;
114    VkLayerDispatchTable *device_dispatch_table;
115    VkLayerInstanceDispatchTable *instance_dispatch_table;
116
117    devExts device_extensions;
118    unordered_set<VkQueue> queues;  // All queues under given device
119    // Vector indices correspond to queueFamilyIndex
120    vector<unique_ptr<VkQueueFamilyProperties>> queue_family_properties;
121    // Global set of all cmdBuffers that are inFlight on this device
122    unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
123    // Layer specific data
124    unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> samplerMap;
125    unordered_map<VkImageView, unique_ptr<VkImageViewCreateInfo>> imageViewMap;
126    unordered_map<VkImage, unique_ptr<IMAGE_NODE>> imageMap;
127    unordered_map<VkBufferView, unique_ptr<VkBufferViewCreateInfo>> bufferViewMap;
128    unordered_map<VkBuffer, unique_ptr<BUFFER_NODE>> bufferMap;
129    unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap;
130    unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
131    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap;
132    unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
133    unordered_map<VkDescriptorSetLayout, cvdescriptorset::DescriptorSetLayout *> descriptorSetLayoutMap;
134    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
135    unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
136    unordered_map<VkFence, FENCE_NODE> fenceMap;
137    unordered_map<VkQueue, QUEUE_NODE> queueMap;
138    unordered_map<VkEvent, EVENT_NODE> eventMap;
139    unordered_map<QueryObject, bool> queryToStateMap;
140    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
141    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
142    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
143    unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_NODE>> frameBufferMap;
144    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
145    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
146    unordered_map<VkRenderPass, RENDER_PASS_NODE *> renderPassMap;
147    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
148    VkDevice device;
149
150    // Device specific data
151    PHYS_DEV_PROPERTIES_NODE phys_dev_properties;
152    VkPhysicalDeviceMemoryProperties phys_dev_mem_props;
153    VkPhysicalDeviceFeatures physical_device_features;
154    unique_ptr<PHYSICAL_DEVICE_STATE> physical_device_state;
155
156    layer_data()
157        : instance_state(nullptr), report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr),
158          device_extensions(), device(VK_NULL_HANDLE), phys_dev_properties{}, phys_dev_mem_props{}, physical_device_features{},
159          physical_device_state(nullptr){};
160};
161
162// TODO : Do we need to guard access to layer_data_map w/ lock?
163static unordered_map<void *, layer_data *> layer_data_map;
164
165static const VkLayerProperties global_layer = {
166    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
167};
168
169template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
170    bool foundLayer = false;
171    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
172        if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
173            foundLayer = true;
174        }
175        // This has to be logged to console as we don't have a callback at this point.
176        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
177            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
178                       global_layer.layerName);
179        }
180    }
181}
182
183// Code imported from shader_checker
184static void build_def_index(shader_module *);
185
186// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
187// without the caller needing to care too much about the physical SPIRV module layout.
188struct spirv_inst_iter {
189    std::vector<uint32_t>::const_iterator zero;
190    std::vector<uint32_t>::const_iterator it;
191
192    uint32_t len() {
193        auto result = *it >> 16;
194        assert(result > 0);
195        return result;
196    }
197
198    uint32_t opcode() { return *it & 0x0ffffu; }
199
200    uint32_t const &word(unsigned n) {
201        assert(n < len());
202        return it[n];
203    }
204
205    uint32_t offset() { return (uint32_t)(it - zero); }
206
207    spirv_inst_iter() {}
208
209    spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
210
211    bool operator==(spirv_inst_iter const &other) { return it == other.it; }
212
213    bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
214
215    spirv_inst_iter operator++(int) { /* x++ */
216        spirv_inst_iter ii = *this;
217        it += len();
218        return ii;
219    }
220
221    spirv_inst_iter operator++() { /* ++x; */
222        it += len();
223        return *this;
224    }
225
226    /* The iterator and the value are the same thing. */
227    spirv_inst_iter &operator*() { return *this; }
228    spirv_inst_iter const &operator*() const { return *this; }
229};
230
231struct shader_module {
232    /* the spirv image itself */
233    vector<uint32_t> words;
234    /* a mapping of <id> to the first word of its def. this is useful because walking type
235     * trees, constant expressions, etc requires jumping all over the instruction stream.
236     */
237    unordered_map<unsigned, unsigned> def_index;
238
239    shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
240        : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
241          def_index() {
242
243        build_def_index(this);
244    }
245
246    /* expose begin() / end() to enable range-based for */
247    spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
248    spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }         /* just past last insn */
249    /* given an offset into the module, produce an iterator there. */
250    spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
251
252    /* gets an iterator to the definition of an id */
253    spirv_inst_iter get_def(unsigned id) const {
254        auto it = def_index.find(id);
255        if (it == def_index.end()) {
256            return end();
257        }
258        return at(it->second);
259    }
260};
261
262// TODO : This can be much smarter, using separate locks for separate global data
263static std::mutex global_lock;
264
265// Return ImageViewCreateInfo ptr for specified imageView or else NULL
266VkImageViewCreateInfo *getImageViewData(const layer_data *dev_data, VkImageView image_view) {
267    auto iv_it = dev_data->imageViewMap.find(image_view);
268    if (iv_it == dev_data->imageViewMap.end()) {
269        return nullptr;
270    }
271    return iv_it->second.get();
272}
273// Return sampler node ptr for specified sampler or else NULL
274SAMPLER_NODE *getSamplerNode(const layer_data *dev_data, VkSampler sampler) {
275    auto sampler_it = dev_data->samplerMap.find(sampler);
276    if (sampler_it == dev_data->samplerMap.end()) {
277        return nullptr;
278    }
279    return sampler_it->second.get();
280}
281// Return image node ptr for specified image or else NULL
282IMAGE_NODE *getImageNode(const layer_data *dev_data, VkImage image) {
283    auto img_it = dev_data->imageMap.find(image);
284    if (img_it == dev_data->imageMap.end()) {
285        return nullptr;
286    }
287    return img_it->second.get();
288}
289// Return buffer node ptr for specified buffer or else NULL
290BUFFER_NODE *getBufferNode(const layer_data *dev_data, VkBuffer buffer) {
291    auto buff_it = dev_data->bufferMap.find(buffer);
292    if (buff_it == dev_data->bufferMap.end()) {
293        return nullptr;
294    }
295    return buff_it->second.get();
296}
297// Return swapchain node for specified swapchain or else NULL
298SWAPCHAIN_NODE *getSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
299    auto swp_it = dev_data->device_extensions.swapchainMap.find(swapchain);
300    if (swp_it == dev_data->device_extensions.swapchainMap.end()) {
301        return nullptr;
302    }
303    return swp_it->second.get();
304}
305// Return swapchain for specified image or else NULL
306VkSwapchainKHR getSwapchainFromImage(const layer_data *dev_data, VkImage image) {
307    auto img_it = dev_data->device_extensions.imageToSwapchainMap.find(image);
308    if (img_it == dev_data->device_extensions.imageToSwapchainMap.end()) {
309        return VK_NULL_HANDLE;
310    }
311    return img_it->second;
312}
313// Return buffer node ptr for specified buffer or else NULL
314VkBufferViewCreateInfo *getBufferViewInfo(const layer_data *my_data, VkBufferView buffer_view) {
315    auto bv_it = my_data->bufferViewMap.find(buffer_view);
316    if (bv_it == my_data->bufferViewMap.end()) {
317        return nullptr;
318    }
319    return bv_it->second.get();
320}
321
322FENCE_NODE *getFenceNode(layer_data *dev_data, VkFence fence) {
323    auto it = dev_data->fenceMap.find(fence);
324    if (it == dev_data->fenceMap.end()) {
325        return nullptr;
326    }
327    return &it->second;
328}
329
330EVENT_NODE *getEventNode(layer_data *dev_data, VkEvent event) {
331    auto it = dev_data->eventMap.find(event);
332    if (it == dev_data->eventMap.end()) {
333        return nullptr;
334    }
335    return &it->second;
336}
337
338QUERY_POOL_NODE *getQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
339    auto it = dev_data->queryPoolMap.find(query_pool);
340    if (it == dev_data->queryPoolMap.end()) {
341        return nullptr;
342    }
343    return &it->second;
344}
345
346QUEUE_NODE *getQueueNode(layer_data *dev_data, VkQueue queue) {
347    auto it = dev_data->queueMap.find(queue);
348    if (it == dev_data->queueMap.end()) {
349        return nullptr;
350    }
351    return &it->second;
352}
353
354SEMAPHORE_NODE *getSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
355    auto it = dev_data->semaphoreMap.find(semaphore);
356    if (it == dev_data->semaphoreMap.end()) {
357        return nullptr;
358    }
359    return &it->second;
360}
361
362COMMAND_POOL_NODE *getCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
363    auto it = dev_data->commandPoolMap.find(pool);
364    if (it == dev_data->commandPoolMap.end()) {
365        return nullptr;
366    }
367    return &it->second;
368}
369
370static VkDeviceMemory *get_object_mem_binding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
371    switch (type) {
372    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
373        auto img_node = getImageNode(my_data, VkImage(handle));
374        if (img_node)
375            return &img_node->mem;
376        break;
377    }
378    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
379        auto buff_node = getBufferNode(my_data, VkBuffer(handle));
380        if (buff_node)
381            return &buff_node->mem;
382        break;
383    }
384    default:
385        break;
386    }
387    return nullptr;
388}
389
390// prototype
391static GLOBAL_CB_NODE *getCBNode(layer_data const *, const VkCommandBuffer);
392
393// Helper function to validate correct usage bits set for buffers or images
394//  Verify that (actual & desired) flags != 0 or,
395//   if strict is true, verify that (actual & desired) flags == desired
396//  In case of error, report it via dbg callbacks
397static bool validate_usage_flags(layer_data *my_data, VkFlags actual, VkFlags desired, VkBool32 strict,
398                                     uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str,
399                                     char const *func_name, char const *usage_str) {
400    bool correct_usage = false;
401    bool skip_call = false;
402    if (strict)
403        correct_usage = ((actual & desired) == desired);
404    else
405        correct_usage = ((actual & desired) != 0);
406    if (!correct_usage) {
407        skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
408                            MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s 0x%" PRIxLEAST64
409                                                                " used by %s. In this case, %s should have %s set during creation.",
410                            ty_str, obj_handle, func_name, ty_str, usage_str);
411    }
412    return skip_call;
413}
414
415// Helper function to validate usage flags for buffers
416// For given buffer_node send actual vs. desired usage off to helper above where
417//  an error will be flagged if usage is not correct
418static bool ValidateImageUsageFlags(layer_data *dev_data, IMAGE_NODE const *image_node, VkFlags desired, VkBool32 strict,
419                                    char const *func_name, char const *usage_string) {
420    return validate_usage_flags(dev_data, image_node->createInfo.usage, desired, strict,
421                                reinterpret_cast<const uint64_t &>(image_node->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
422                                "image", func_name, usage_string);
423}
424
425// Helper function to validate usage flags for buffers
426// For given buffer_node send actual vs. desired usage off to helper above where
427//  an error will be flagged if usage is not correct
428static bool ValidateBufferUsageFlags(layer_data *dev_data, BUFFER_NODE const *buffer_node, VkFlags desired, VkBool32 strict,
429                                     char const *func_name, char const *usage_string) {
430    return validate_usage_flags(dev_data, buffer_node->createInfo.usage, desired, strict,
431                                reinterpret_cast<const uint64_t &>(buffer_node->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
432                                "buffer", func_name, usage_string);
433}
434
435// Return ptr to info in map container containing mem, or NULL if not found
436//  Calls to this function should be wrapped in mutex
437DEVICE_MEM_INFO *getMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
438    auto mem_it = dev_data->memObjMap.find(mem);
439    if (mem_it == dev_data->memObjMap.end()) {
440        return NULL;
441    }
442    return mem_it->second.get();
443}
444
445static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
446                             const VkMemoryAllocateInfo *pAllocateInfo) {
447    assert(object != NULL);
448
449    my_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo));
450}
451// For given bound_object_handle, bound to given mem allocation, verify that the range for the bound object is valid
452static bool ValidateMemoryIsValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t bound_object_handle,
453                                  const char *functionName) {
454    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
455    if (mem_info) {
456        if (!mem_info->bound_ranges[bound_object_handle].valid) {
457            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
458                           reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
459                           "%s: Cannot read invalid memory 0x%" PRIx64 ", please fill the memory before using.", functionName,
460                           reinterpret_cast<uint64_t &>(mem));
461        }
462    }
463    return false;
464}
465// For given image_node
466//  If mem is special swapchain key, then verify that image_node valid member is true
467//  Else verify that the image's bound memory range is valid
468static bool ValidateImageMemoryIsValid(layer_data *dev_data, IMAGE_NODE *image_node, const char *functionName) {
469    if (image_node->mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
470        if (!image_node->valid) {
471            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
472                           reinterpret_cast<uint64_t &>(image_node->mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
473                           "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
474                           functionName, reinterpret_cast<uint64_t &>(image_node->image));
475        }
476    } else {
477        return ValidateMemoryIsValid(dev_data, image_node->mem, reinterpret_cast<uint64_t &>(image_node->image), functionName);
478    }
479    return false;
480}
481// For given buffer_node, verify that the range it's bound to is valid
482static bool ValidateBufferMemoryIsValid(layer_data *dev_data, BUFFER_NODE *buffer_node, const char *functionName) {
483    return ValidateMemoryIsValid(dev_data, buffer_node->mem, reinterpret_cast<uint64_t &>(buffer_node->buffer), functionName);
484}
485// For the given memory allocation, set the range bound by the given handle object to the valid param value
486static void SetMemoryValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, bool valid) {
487    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
488    if (mem_info) {
489        mem_info->bound_ranges[handle].valid = valid;
490    }
491}
492// For given image node
493//  If mem is special swapchain key, then set entire image_node to valid param value
494//  Else set the image's bound memory range to valid param value
495static void SetImageMemoryValid(layer_data *dev_data, IMAGE_NODE *image_node, bool valid) {
496    if (image_node->mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
497        image_node->valid = valid;
498    } else {
499        SetMemoryValid(dev_data, image_node->mem, reinterpret_cast<uint64_t &>(image_node->image), valid);
500    }
501}
502// For given buffer node set the buffer's bound memory range to valid param value
503static void SetBufferMemoryValid(layer_data *dev_data, BUFFER_NODE *buffer_node, bool valid) {
504    SetMemoryValid(dev_data, buffer_node->mem, reinterpret_cast<uint64_t &>(buffer_node->buffer), valid);
505}
506// Find CB Info and add mem reference to list container
507// Find Mem Obj Info and add CB reference to list container
508static bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
509                                              const char *apiName) {
510    bool skip_call = false;
511
512    // Skip validation if this image was created through WSI
513    if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
514
515        // First update CB binding in MemObj mini CB list
516        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem);
517        if (pMemInfo) {
518            pMemInfo->command_buffer_bindings.insert(cb);
519            // Now update CBInfo's Mem reference list
520            GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
521            // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
522            if (pCBNode) {
523                pCBNode->memObjs.insert(mem);
524            }
525        }
526    }
527    return skip_call;
528}
529
530// Create binding link between given iamge node and command buffer node
531static bool addCommandBufferBindingImage(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_NODE *img_node, const char *apiName) {
532    bool skip_call = false;
533    // Skip validation if this image was created through WSI
534    if (img_node->mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
535        // First update CB binding in MemObj mini CB list
536        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, img_node->mem);
537        if (pMemInfo) {
538            pMemInfo->command_buffer_bindings.insert(cb_node->commandBuffer);
539            // Now update CBInfo's Mem reference list
540            cb_node->memObjs.insert(img_node->mem);
541        }
542        cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(img_node->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT});
543    }
544    // Now update cb binding for image
545    img_node->cb_bindings.insert(cb_node);
546    return skip_call;
547}
548
549// Create binding link between given buffer node and command buffer node
550static bool addCommandBufferBindingBuffer(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_NODE *buff_node,
551                                          const char *apiName) {
552    bool skip_call = false;
553
554    // First update CB binding in MemObj mini CB list
555    DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, buff_node->mem);
556    if (pMemInfo) {
557        pMemInfo->command_buffer_bindings.insert(cb_node->commandBuffer);
558        // Now update CBInfo's Mem reference list
559        cb_node->memObjs.insert(buff_node->mem);
560        cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(buff_node->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT});
561    }
562    // Now update cb binding for buffer
563    buff_node->cb_bindings.insert(cb_node);
564
565    return skip_call;
566}
567
568// For every mem obj bound to particular CB, free bindings related to that CB
569static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *pCBNode) {
570    if (pCBNode) {
571        if (pCBNode->memObjs.size() > 0) {
572            for (auto mem : pCBNode->memObjs) {
573                DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
574                if (pInfo) {
575                    pInfo->command_buffer_bindings.erase(pCBNode->commandBuffer);
576                }
577            }
578            pCBNode->memObjs.clear();
579        }
580        pCBNode->validate_functions.clear();
581    }
582}
583// Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up
584static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
585    clear_cmd_buf_and_mem_references(dev_data, getCBNode(dev_data, cb));
586}
587
588// For given MemObjInfo, report Obj & CB bindings
589static bool reportMemReferencesAndCleanUp(layer_data *dev_data, DEVICE_MEM_INFO *pMemObjInfo) {
590    bool skip_call = false;
591    size_t cmdBufRefCount = pMemObjInfo->command_buffer_bindings.size();
592    size_t objRefCount = pMemObjInfo->obj_bindings.size();
593
594    if ((pMemObjInfo->command_buffer_bindings.size()) != 0) {
595        skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
596                            (uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
597                            "Attempting to free memory object 0x%" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER
598                            " references",
599                            (uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount));
600    }
601
602    if (cmdBufRefCount > 0 && pMemObjInfo->command_buffer_bindings.size() > 0) {
603        for (auto cb : pMemObjInfo->command_buffer_bindings) {
604            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
605                    (uint64_t)cb, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
606                    "Command Buffer 0x%p still has a reference to mem obj 0x%" PRIxLEAST64, cb, (uint64_t)pMemObjInfo->mem);
607        }
608        // Clear the list of hanging references
609        pMemObjInfo->command_buffer_bindings.clear();
610    }
611
612    if (objRefCount > 0 && pMemObjInfo->obj_bindings.size() > 0) {
613        for (auto obj : pMemObjInfo->obj_bindings) {
614            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__,
615                    MEMTRACK_FREED_MEM_REF, "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64,
616                    obj.handle, (uint64_t)pMemObjInfo->mem);
617        }
618        // Clear the list of hanging references
619        pMemObjInfo->obj_bindings.clear();
620    }
621    return skip_call;
622}
623
624static bool freeMemObjInfo(layer_data *dev_data, void *object, VkDeviceMemory mem, bool internal) {
625    bool skip_call = false;
626    // Parse global list to find info w/ mem
627    DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
628    if (pInfo) {
629        // TODO: Verify against Valid Use section
630        // Clear any CB bindings for completed CBs
631        //   TODO : Is there a better place to do this?
632
633        assert(pInfo->object != VK_NULL_HANDLE);
634        // clear_cmd_buf_and_mem_references removes elements from
635        // pInfo->command_buffer_bindings -- this copy not needed in c++14,
636        // and probably not needed in practice in c++11
637        auto bindings = pInfo->command_buffer_bindings;
638        for (auto cb : bindings) {
639            if (!dev_data->globalInFlightCmdBuffers.count(cb)) {
640                clear_cmd_buf_and_mem_references(dev_data, cb);
641            }
642        }
643
644        // Now verify that no references to this mem obj remain and remove bindings
645        if (pInfo->command_buffer_bindings.size() || pInfo->obj_bindings.size()) {
646            skip_call |= reportMemReferencesAndCleanUp(dev_data, pInfo);
647        }
648        // Delete mem obj info
649        dev_data->memObjMap.erase(dev_data->memObjMap.find(mem));
650    } else if (VK_NULL_HANDLE != mem) {
651        // The request is to free an invalid, non-zero handle
652        skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
653                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
654                            reinterpret_cast<uint64_t &>(mem), __LINE__,
655                            MEMTRACK_INVALID_MEM_OBJ,
656                            "MEM", "Request to delete memory object 0x%"
657                            PRIxLEAST64 " not present in memory Object Map",
658                            reinterpret_cast<uint64_t &>(mem));
659    }
660    return skip_call;
661}
662
663static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
664    switch (type) {
665    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
666        return "image";
667    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
668        return "buffer";
669    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
670        return "swapchain";
671    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT:
672        return "descriptor set";
673    case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT:
674        return "buffer";
675    case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT:
676        return "event";
677    case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT:
678        return "query pool";
679    case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT:
680        return "pipeline";
681    default:
682        return "unknown";
683    }
684}
685
686// Remove object binding performs 3 tasks:
687// 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it
688// 2. Clear mem binding for image/buffer by setting its handle to 0
689// TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized?
690static bool clear_object_binding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
691    // TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately
692    bool skip_call = false;
693    VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
694    if (pMemBinding) {
695        DEVICE_MEM_INFO *pMemObjInfo = getMemObjInfo(dev_data, *pMemBinding);
696        // TODO : Make sure this is a reasonable way to reset mem binding
697        *pMemBinding = VK_NULL_HANDLE;
698        if (pMemObjInfo) {
699            // This obj is bound to a memory object. Remove the reference to this object in that memory object's list,
700            // and set the objects memory binding pointer to NULL.
701            if (!pMemObjInfo->obj_bindings.erase({handle, type})) {
702                skip_call |=
703                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
704                            "MEM", "While trying to clear mem binding for %s obj 0x%" PRIxLEAST64
705                                   ", unable to find that object referenced by mem obj 0x%" PRIxLEAST64,
706                            object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem);
707            }
708        }
709    }
710    return skip_call;
711}
712
713// Check to see if memory was ever bound to this image
714bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_NODE *image_node, const char *api_name) {
715    bool result = false;
716    if (0 == (static_cast<uint32_t>(image_node->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
717        if (0 == image_node->mem) {
718            result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
719                             reinterpret_cast<const uint64_t &>(image_node->image), __LINE__, MEMTRACK_OBJECT_NOT_BOUND, "MEM",
720                             "%s: VkImage object 0x%" PRIxLEAST64 " used without first calling vkBindImageMemory.", api_name,
721                             reinterpret_cast<const uint64_t &>(image_node->image));
722        }
723    }
724    return result;
725}
726
727// Check to see if memory was bound to this buffer
728bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_NODE *buffer_node, const char *api_name) {
729    bool result = false;
730    if (0 == (static_cast<uint32_t>(buffer_node->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
731        if (0 == buffer_node->mem) {
732            result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
733                             reinterpret_cast<const uint64_t &>(buffer_node->buffer), __LINE__, MEMTRACK_OBJECT_NOT_BOUND, "MEM",
734                             "%s: VkBuffer object 0x%" PRIxLEAST64 " used without first calling vkBindBufferMemory.", api_name,
735                             reinterpret_cast<const uint64_t &>(buffer_node->buffer));
736        }
737    }
738    return result;
739}
740
741// For NULL mem case, output warning
742// Make sure given object is in global object map
743//  IF a previous binding existed, output validation error
744//  Otherwise, add reference from objectInfo to memoryInfo
745//  Add reference off of objInfo
746static bool set_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
747                                VkDebugReportObjectTypeEXT type, const char *apiName) {
748    bool skip_call = false;
749    // Handle NULL case separately, just clear previous binding & decrement reference
750    if (mem == VK_NULL_HANDLE) {
751        // TODO: Verify against Valid Use section of spec.
752        skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ,
753                            "MEM", "In %s, attempting to Bind Obj(0x%" PRIxLEAST64 ") to NULL", apiName, handle);
754    } else {
755        VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
756        assert(pMemBinding);
757        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem);
758        if (pMemInfo) {
759            DEVICE_MEM_INFO *pPrevBinding = getMemObjInfo(dev_data, *pMemBinding);
760            if (pPrevBinding != NULL) {
761                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
762                                     VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_REBIND_OBJECT,
763                                     "MEM", "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
764                                            ") which has already been bound to mem object 0x%" PRIxLEAST64,
765                                     apiName, (uint64_t)mem, handle, (uint64_t)pPrevBinding->mem);
766            } else {
767                pMemInfo->obj_bindings.insert({handle, type});
768                // For image objects, make sure default memory state is correctly set
769                // TODO : What's the best/correct way to handle this?
770                if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
771                    auto const image_node = getImageNode(dev_data, VkImage(handle));
772                    if (image_node) {
773                        VkImageCreateInfo ici = image_node->createInfo;
774                        if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
775                            // TODO::  More memory state transition stuff.
776                        }
777                    }
778                }
779                *pMemBinding = mem;
780            }
781        }
782    }
783    return skip_call;
784}
785
786// For NULL mem case, clear any previous binding Else...
787// Make sure given object is in its object map
788//  IF a previous binding existed, update binding
789//  Add reference from objectInfo to memoryInfo
790//  Add reference off of object's binding info
791// Return VK_TRUE if addition is successful, VK_FALSE otherwise
792static bool set_sparse_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
793                                       VkDebugReportObjectTypeEXT type, const char *apiName) {
794    bool skip_call = VK_FALSE;
795    // Handle NULL case separately, just clear previous binding & decrement reference
796    if (mem == VK_NULL_HANDLE) {
797        skip_call = clear_object_binding(dev_data, handle, type);
798    } else {
799        VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
800        assert(pMemBinding);
801        DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
802        if (pInfo) {
803            pInfo->obj_bindings.insert({handle, type});
804            // Need to set mem binding for this object
805            *pMemBinding = mem;
806        }
807    }
808    return skip_call;
809}
810
811// For handle of given object type, return memory binding
812static bool get_mem_for_type(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) {
813    bool skip_call = false;
814    *mem = VK_NULL_HANDLE;
815    switch (type) {
816    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
817        *mem = getImageNode(dev_data, VkImage(handle))->mem;
818        break;
819    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
820        *mem = getBufferNode(dev_data, VkBuffer(handle))->mem;
821        break;
822    default:
823        assert(0);
824    }
825    if (!*mem) {
826        skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
827                            "MEM", "Trying to get mem binding for %s object 0x%" PRIxLEAST64
828                                   " but binding is NULL. Has memory been bound to this object?",
829                            object_type_to_string(type), handle);
830    }
831    return skip_call;
832}
833
834// Print details of MemObjInfo list
835static void print_mem_list(layer_data *dev_data) {
836    // Early out if info is not requested
837    if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
838        return;
839    }
840
841    // Just printing each msg individually for now, may want to package these into single large print
842    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
843            MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
844            dev_data->memObjMap.size());
845    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
846            MEMTRACK_NONE, "MEM", "=============================");
847
848    if (dev_data->memObjMap.size() <= 0)
849        return;
850
851    for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
852        auto mem_info = (*ii).second.get();
853
854        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
855                __LINE__, MEMTRACK_NONE, "MEM", "    ===MemObjInfo at 0x%p===", (void *)mem_info);
856        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
857                __LINE__, MEMTRACK_NONE, "MEM", "    Mem object: 0x%" PRIxLEAST64, (uint64_t)(mem_info->mem));
858        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
859                __LINE__, MEMTRACK_NONE, "MEM", "    Ref Count: " PRINTF_SIZE_T_SPECIFIER,
860                mem_info->command_buffer_bindings.size() + mem_info->obj_bindings.size());
861        if (0 != mem_info->alloc_info.allocationSize) {
862            string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&mem_info->alloc_info, "MEM(INFO):         ");
863            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
864                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
865        } else {
866            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
867                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
868        }
869
870        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
871                __LINE__, MEMTRACK_NONE, "MEM", "    VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:",
872                mem_info->obj_bindings.size());
873        if (mem_info->obj_bindings.size() > 0) {
874            for (auto obj : mem_info->obj_bindings) {
875                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
876                        0, __LINE__, MEMTRACK_NONE, "MEM", "       VK OBJECT 0x%" PRIx64, obj.handle);
877            }
878        }
879
880        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
881                __LINE__, MEMTRACK_NONE, "MEM",
882                "    VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements",
883                mem_info->command_buffer_bindings.size());
884        if (mem_info->command_buffer_bindings.size() > 0) {
885            for (auto cb : mem_info->command_buffer_bindings) {
886                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
887                        0, __LINE__, MEMTRACK_NONE, "MEM", "      VK CB 0x%p", cb);
888            }
889        }
890    }
891}
892
893static void printCBList(layer_data *my_data) {
894    GLOBAL_CB_NODE *pCBInfo = NULL;
895
896    // Early out if info is not requested
897    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
898        return;
899    }
900
901    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
902            MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
903            my_data->commandBufferMap.size());
904    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
905            MEMTRACK_NONE, "MEM", "==================");
906
907    if (my_data->commandBufferMap.size() <= 0)
908        return;
909
910    for (auto &cb_node : my_data->commandBufferMap) {
911        pCBInfo = cb_node.second;
912
913        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
914                __LINE__, MEMTRACK_NONE, "MEM", "    CB Info (0x%p) has CB 0x%p", (void *)pCBInfo, (void *)pCBInfo->commandBuffer);
915
916        if (pCBInfo->memObjs.size() <= 0)
917            continue;
918        for (auto obj : pCBInfo->memObjs) {
919            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
920                    __LINE__, MEMTRACK_NONE, "MEM", "      Mem obj 0x%" PRIx64, (uint64_t)obj);
921        }
922    }
923}
924
925// Return a string representation of CMD_TYPE enum
926static string cmdTypeToString(CMD_TYPE cmd) {
927    switch (cmd) {
928    case CMD_BINDPIPELINE:
929        return "CMD_BINDPIPELINE";
930    case CMD_BINDPIPELINEDELTA:
931        return "CMD_BINDPIPELINEDELTA";
932    case CMD_SETVIEWPORTSTATE:
933        return "CMD_SETVIEWPORTSTATE";
934    case CMD_SETLINEWIDTHSTATE:
935        return "CMD_SETLINEWIDTHSTATE";
936    case CMD_SETDEPTHBIASSTATE:
937        return "CMD_SETDEPTHBIASSTATE";
938    case CMD_SETBLENDSTATE:
939        return "CMD_SETBLENDSTATE";
940    case CMD_SETDEPTHBOUNDSSTATE:
941        return "CMD_SETDEPTHBOUNDSSTATE";
942    case CMD_SETSTENCILREADMASKSTATE:
943        return "CMD_SETSTENCILREADMASKSTATE";
944    case CMD_SETSTENCILWRITEMASKSTATE:
945        return "CMD_SETSTENCILWRITEMASKSTATE";
946    case CMD_SETSTENCILREFERENCESTATE:
947        return "CMD_SETSTENCILREFERENCESTATE";
948    case CMD_BINDDESCRIPTORSETS:
949        return "CMD_BINDDESCRIPTORSETS";
950    case CMD_BINDINDEXBUFFER:
951        return "CMD_BINDINDEXBUFFER";
952    case CMD_BINDVERTEXBUFFER:
953        return "CMD_BINDVERTEXBUFFER";
954    case CMD_DRAW:
955        return "CMD_DRAW";
956    case CMD_DRAWINDEXED:
957        return "CMD_DRAWINDEXED";
958    case CMD_DRAWINDIRECT:
959        return "CMD_DRAWINDIRECT";
960    case CMD_DRAWINDEXEDINDIRECT:
961        return "CMD_DRAWINDEXEDINDIRECT";
962    case CMD_DISPATCH:
963        return "CMD_DISPATCH";
964    case CMD_DISPATCHINDIRECT:
965        return "CMD_DISPATCHINDIRECT";
966    case CMD_COPYBUFFER:
967        return "CMD_COPYBUFFER";
968    case CMD_COPYIMAGE:
969        return "CMD_COPYIMAGE";
970    case CMD_BLITIMAGE:
971        return "CMD_BLITIMAGE";
972    case CMD_COPYBUFFERTOIMAGE:
973        return "CMD_COPYBUFFERTOIMAGE";
974    case CMD_COPYIMAGETOBUFFER:
975        return "CMD_COPYIMAGETOBUFFER";
976    case CMD_CLONEIMAGEDATA:
977        return "CMD_CLONEIMAGEDATA";
978    case CMD_UPDATEBUFFER:
979        return "CMD_UPDATEBUFFER";
980    case CMD_FILLBUFFER:
981        return "CMD_FILLBUFFER";
982    case CMD_CLEARCOLORIMAGE:
983        return "CMD_CLEARCOLORIMAGE";
984    case CMD_CLEARATTACHMENTS:
985        return "CMD_CLEARCOLORATTACHMENT";
986    case CMD_CLEARDEPTHSTENCILIMAGE:
987        return "CMD_CLEARDEPTHSTENCILIMAGE";
988    case CMD_RESOLVEIMAGE:
989        return "CMD_RESOLVEIMAGE";
990    case CMD_SETEVENT:
991        return "CMD_SETEVENT";
992    case CMD_RESETEVENT:
993        return "CMD_RESETEVENT";
994    case CMD_WAITEVENTS:
995        return "CMD_WAITEVENTS";
996    case CMD_PIPELINEBARRIER:
997        return "CMD_PIPELINEBARRIER";
998    case CMD_BEGINQUERY:
999        return "CMD_BEGINQUERY";
1000    case CMD_ENDQUERY:
1001        return "CMD_ENDQUERY";
1002    case CMD_RESETQUERYPOOL:
1003        return "CMD_RESETQUERYPOOL";
1004    case CMD_COPYQUERYPOOLRESULTS:
1005        return "CMD_COPYQUERYPOOLRESULTS";
1006    case CMD_WRITETIMESTAMP:
1007        return "CMD_WRITETIMESTAMP";
1008    case CMD_INITATOMICCOUNTERS:
1009        return "CMD_INITATOMICCOUNTERS";
1010    case CMD_LOADATOMICCOUNTERS:
1011        return "CMD_LOADATOMICCOUNTERS";
1012    case CMD_SAVEATOMICCOUNTERS:
1013        return "CMD_SAVEATOMICCOUNTERS";
1014    case CMD_BEGINRENDERPASS:
1015        return "CMD_BEGINRENDERPASS";
1016    case CMD_ENDRENDERPASS:
1017        return "CMD_ENDRENDERPASS";
1018    default:
1019        return "UNKNOWN";
1020    }
1021}
1022
1023// SPIRV utility functions
1024static void build_def_index(shader_module *module) {
1025    for (auto insn : *module) {
1026        switch (insn.opcode()) {
1027        /* Types */
1028        case spv::OpTypeVoid:
1029        case spv::OpTypeBool:
1030        case spv::OpTypeInt:
1031        case spv::OpTypeFloat:
1032        case spv::OpTypeVector:
1033        case spv::OpTypeMatrix:
1034        case spv::OpTypeImage:
1035        case spv::OpTypeSampler:
1036        case spv::OpTypeSampledImage:
1037        case spv::OpTypeArray:
1038        case spv::OpTypeRuntimeArray:
1039        case spv::OpTypeStruct:
1040        case spv::OpTypeOpaque:
1041        case spv::OpTypePointer:
1042        case spv::OpTypeFunction:
1043        case spv::OpTypeEvent:
1044        case spv::OpTypeDeviceEvent:
1045        case spv::OpTypeReserveId:
1046        case spv::OpTypeQueue:
1047        case spv::OpTypePipe:
1048            module->def_index[insn.word(1)] = insn.offset();
1049            break;
1050
1051        /* Fixed constants */
1052        case spv::OpConstantTrue:
1053        case spv::OpConstantFalse:
1054        case spv::OpConstant:
1055        case spv::OpConstantComposite:
1056        case spv::OpConstantSampler:
1057        case spv::OpConstantNull:
1058            module->def_index[insn.word(2)] = insn.offset();
1059            break;
1060
1061        /* Specialization constants */
1062        case spv::OpSpecConstantTrue:
1063        case spv::OpSpecConstantFalse:
1064        case spv::OpSpecConstant:
1065        case spv::OpSpecConstantComposite:
1066        case spv::OpSpecConstantOp:
1067            module->def_index[insn.word(2)] = insn.offset();
1068            break;
1069
1070        /* Variables */
1071        case spv::OpVariable:
1072            module->def_index[insn.word(2)] = insn.offset();
1073            break;
1074
1075        /* Functions */
1076        case spv::OpFunction:
1077            module->def_index[insn.word(2)] = insn.offset();
1078            break;
1079
1080        default:
1081            /* We don't care about any other defs for now. */
1082            break;
1083        }
1084    }
1085}
1086
1087static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
1088    for (auto insn : *src) {
1089        if (insn.opcode() == spv::OpEntryPoint) {
1090            auto entrypointName = (char const *)&insn.word(3);
1091            auto entrypointStageBits = 1u << insn.word(1);
1092
1093            if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
1094                return insn;
1095            }
1096        }
1097    }
1098
1099    return src->end();
1100}
1101
1102static char const *storage_class_name(unsigned sc) {
1103    switch (sc) {
1104    case spv::StorageClassInput:
1105        return "input";
1106    case spv::StorageClassOutput:
1107        return "output";
1108    case spv::StorageClassUniformConstant:
1109        return "const uniform";
1110    case spv::StorageClassUniform:
1111        return "uniform";
1112    case spv::StorageClassWorkgroup:
1113        return "workgroup local";
1114    case spv::StorageClassCrossWorkgroup:
1115        return "workgroup global";
1116    case spv::StorageClassPrivate:
1117        return "private global";
1118    case spv::StorageClassFunction:
1119        return "function";
1120    case spv::StorageClassGeneric:
1121        return "generic";
1122    case spv::StorageClassAtomicCounter:
1123        return "atomic counter";
1124    case spv::StorageClassImage:
1125        return "image";
1126    case spv::StorageClassPushConstant:
1127        return "push constant";
1128    default:
1129        return "unknown";
1130    }
1131}
1132
1133/* get the value of an integral constant */
1134unsigned get_constant_value(shader_module const *src, unsigned id) {
1135    auto value = src->get_def(id);
1136    assert(value != src->end());
1137
1138    if (value.opcode() != spv::OpConstant) {
1139        /* TODO: Either ensure that the specialization transform is already performed on a module we're
1140            considering here, OR -- specialize on the fly now.
1141            */
1142        return 1;
1143    }
1144
1145    return value.word(3);
1146}
1147
1148
1149static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
1150    auto insn = src->get_def(type);
1151    assert(insn != src->end());
1152
1153    switch (insn.opcode()) {
1154    case spv::OpTypeBool:
1155        ss << "bool";
1156        break;
1157    case spv::OpTypeInt:
1158        ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
1159        break;
1160    case spv::OpTypeFloat:
1161        ss << "float" << insn.word(2);
1162        break;
1163    case spv::OpTypeVector:
1164        ss << "vec" << insn.word(3) << " of ";
1165        describe_type_inner(ss, src, insn.word(2));
1166        break;
1167    case spv::OpTypeMatrix:
1168        ss << "mat" << insn.word(3) << " of ";
1169        describe_type_inner(ss, src, insn.word(2));
1170        break;
1171    case spv::OpTypeArray:
1172        ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
1173        describe_type_inner(ss, src, insn.word(2));
1174        break;
1175    case spv::OpTypePointer:
1176        ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1177        describe_type_inner(ss, src, insn.word(3));
1178        break;
1179    case spv::OpTypeStruct: {
1180        ss << "struct of (";
1181        for (unsigned i = 2; i < insn.len(); i++) {
1182            describe_type_inner(ss, src, insn.word(i));
1183            if (i == insn.len() - 1) {
1184                ss << ")";
1185            } else {
1186                ss << ", ";
1187            }
1188        }
1189        break;
1190    }
1191    case spv::OpTypeSampler:
1192        ss << "sampler";
1193        break;
1194    case spv::OpTypeSampledImage:
1195        ss << "sampler+";
1196        describe_type_inner(ss, src, insn.word(2));
1197        break;
1198    case spv::OpTypeImage:
1199        ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1200        break;
1201    default:
1202        ss << "oddtype";
1203        break;
1204    }
1205}
1206
1207
1208static std::string describe_type(shader_module const *src, unsigned type) {
1209    std::ostringstream ss;
1210    describe_type_inner(ss, src, type);
1211    return ss.str();
1212}
1213
1214
1215static bool is_narrow_numeric_type(spirv_inst_iter type)
1216{
1217    if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat)
1218        return false;
1219    return type.word(2) < 64;
1220}
1221
1222
1223static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed, bool b_arrayed, bool relaxed) {
1224    /* walk two type trees together, and complain about differences */
1225    auto a_insn = a->get_def(a_type);
1226    auto b_insn = b->get_def(b_type);
1227    assert(a_insn != a->end());
1228    assert(b_insn != b->end());
1229
1230    if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) {
1231        return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed);
1232    }
1233
1234    if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1235        /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */
1236        return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed);
1237    }
1238
1239    if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) {
1240        return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false);
1241    }
1242
1243    if (a_insn.opcode() != b_insn.opcode()) {
1244        return false;
1245    }
1246
1247    if (a_insn.opcode() == spv::OpTypePointer) {
1248        /* match on pointee type. storage class is expected to differ */
1249        return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed);
1250    }
1251
1252    if (a_arrayed || b_arrayed) {
1253        /* if we havent resolved array-of-verts by here, we're not going to. */
1254        return false;
1255    }
1256
1257    switch (a_insn.opcode()) {
1258    case spv::OpTypeBool:
1259        return true;
1260    case spv::OpTypeInt:
1261        /* match on width, signedness */
1262        return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3);
1263    case spv::OpTypeFloat:
1264        /* match on width */
1265        return a_insn.word(2) == b_insn.word(2);
1266    case spv::OpTypeVector:
1267        /* match on element type, count. */
1268        if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false))
1269            return false;
1270        if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) {
1271            return a_insn.word(3) >= b_insn.word(3);
1272        }
1273        else {
1274            return a_insn.word(3) == b_insn.word(3);
1275        }
1276    case spv::OpTypeMatrix:
1277        /* match on element type, count. */
1278        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && a_insn.word(3) == b_insn.word(3);
1279    case spv::OpTypeArray:
1280        /* match on element type, count. these all have the same layout. we don't get here if
1281         * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction,
1282         * not a literal within OpTypeArray */
1283        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
1284               get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1285    case spv::OpTypeStruct:
1286        /* match on all element types */
1287        {
1288            if (a_insn.len() != b_insn.len()) {
1289                return false; /* structs cannot match if member counts differ */
1290            }
1291
1292            for (unsigned i = 2; i < a_insn.len(); i++) {
1293                if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) {
1294                    return false;
1295                }
1296            }
1297
1298            return true;
1299        }
1300    default:
1301        /* remaining types are CLisms, or may not appear in the interfaces we
1302         * are interested in. Just claim no match.
1303         */
1304        return false;
1305    }
1306}
1307
1308static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1309    auto it = map.find(id);
1310    if (it == map.end())
1311        return def;
1312    else
1313        return it->second;
1314}
1315
1316static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1317    auto insn = src->get_def(type);
1318    assert(insn != src->end());
1319
1320    switch (insn.opcode()) {
1321    case spv::OpTypePointer:
1322        /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
1323         * we're never actually passing pointers around. */
1324        return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1325    case spv::OpTypeArray:
1326        if (strip_array_level) {
1327            return get_locations_consumed_by_type(src, insn.word(2), false);
1328        } else {
1329            return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1330        }
1331    case spv::OpTypeMatrix:
1332        /* num locations is the dimension * element size */
1333        return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1334    case spv::OpTypeVector: {
1335        auto scalar_type = src->get_def(insn.word(2));
1336        auto bit_width = (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ?
1337            scalar_type.word(2) : 32;
1338
1339        /* locations are 128-bit wide; 3- and 4-component vectors of 64 bit
1340         * types require two. */
1341        return (bit_width * insn.word(3) + 127) / 128;
1342    }
1343    default:
1344        /* everything else is just 1. */
1345        return 1;
1346
1347        /* TODO: extend to handle 64bit scalar types, whose vectors may need
1348         * multiple locations. */
1349    }
1350}
1351
1352static unsigned get_locations_consumed_by_format(VkFormat format) {
1353    switch (format) {
1354    case VK_FORMAT_R64G64B64A64_SFLOAT:
1355    case VK_FORMAT_R64G64B64A64_SINT:
1356    case VK_FORMAT_R64G64B64A64_UINT:
1357    case VK_FORMAT_R64G64B64_SFLOAT:
1358    case VK_FORMAT_R64G64B64_SINT:
1359    case VK_FORMAT_R64G64B64_UINT:
1360        return 2;
1361    default:
1362        return 1;
1363    }
1364}
1365
1366typedef std::pair<unsigned, unsigned> location_t;
1367typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1368
1369struct interface_var {
1370    uint32_t id;
1371    uint32_t type_id;
1372    uint32_t offset;
1373    bool is_patch;
1374    bool is_block_member;
1375    /* TODO: collect the name, too? Isn't required to be present. */
1376};
1377
1378struct shader_stage_attributes {
1379    char const *const name;
1380    bool arrayed_input;
1381    bool arrayed_output;
1382};
1383
1384static shader_stage_attributes shader_stage_attribs[] = {
1385    {"vertex shader", false, false},
1386    {"tessellation control shader", true, true},
1387    {"tessellation evaluation shader", true, false},
1388    {"geometry shader", true, false},
1389    {"fragment shader", false, false},
1390};
1391
1392static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1393    while (true) {
1394
1395        if (def.opcode() == spv::OpTypePointer) {
1396            def = src->get_def(def.word(3));
1397        } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1398            def = src->get_def(def.word(2));
1399            is_array_of_verts = false;
1400        } else if (def.opcode() == spv::OpTypeStruct) {
1401            return def;
1402        } else {
1403            return src->end();
1404        }
1405    }
1406}
1407
1408static void collect_interface_block_members(shader_module const *src,
1409                                            std::map<location_t, interface_var> &out,
1410                                            std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1411                                            uint32_t id, uint32_t type_id, bool is_patch) {
1412    /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */
1413    auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch);
1414    if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1415        /* this isn't an interface block. */
1416        return;
1417    }
1418
1419    std::unordered_map<unsigned, unsigned> member_components;
1420
1421    /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */
1422    for (auto insn : *src) {
1423        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1424            unsigned member_index = insn.word(2);
1425
1426            if (insn.word(3) == spv::DecorationComponent) {
1427                unsigned component = insn.word(4);
1428                member_components[member_index] = component;
1429            }
1430        }
1431    }
1432
1433    /* Second pass -- produce the output, from Location decorations */
1434    for (auto insn : *src) {
1435        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1436            unsigned member_index = insn.word(2);
1437            unsigned member_type_id = type.word(2 + member_index);
1438
1439            if (insn.word(3) == spv::DecorationLocation) {
1440                unsigned location = insn.word(4);
1441                unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1442                auto component_it = member_components.find(member_index);
1443                unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1444
1445                for (unsigned int offset = 0; offset < num_locations; offset++) {
1446                    interface_var v;
1447                    v.id = id;
1448                    /* TODO: member index in interface_var too? */
1449                    v.type_id = member_type_id;
1450                    v.offset = offset;
1451                    v.is_patch = is_patch;
1452                    v.is_block_member = true;
1453                    out[std::make_pair(location + offset, component)] = v;
1454                }
1455            }
1456        }
1457    }
1458}
1459
1460static void collect_interface_by_location(shader_module const *src, spirv_inst_iter entrypoint,
1461                                          spv::StorageClass sinterface, std::map<location_t, interface_var> &out,
1462                                          bool is_array_of_verts) {
1463    std::unordered_map<unsigned, unsigned> var_locations;
1464    std::unordered_map<unsigned, unsigned> var_builtins;
1465    std::unordered_map<unsigned, unsigned> var_components;
1466    std::unordered_map<unsigned, unsigned> blocks;
1467    std::unordered_map<unsigned, unsigned> var_patch;
1468
1469    for (auto insn : *src) {
1470
1471        /* We consider two interface models: SSO rendezvous-by-location, and
1472         * builtins. Complain about anything that fits neither model.
1473         */
1474        if (insn.opcode() == spv::OpDecorate) {
1475            if (insn.word(2) == spv::DecorationLocation) {
1476                var_locations[insn.word(1)] = insn.word(3);
1477            }
1478
1479            if (insn.word(2) == spv::DecorationBuiltIn) {
1480                var_builtins[insn.word(1)] = insn.word(3);
1481            }
1482
1483            if (insn.word(2) == spv::DecorationComponent) {
1484                var_components[insn.word(1)] = insn.word(3);
1485            }
1486
1487            if (insn.word(2) == spv::DecorationBlock) {
1488                blocks[insn.word(1)] = 1;
1489            }
1490
1491            if (insn.word(2) == spv::DecorationPatch) {
1492                var_patch[insn.word(1)] = 1;
1493            }
1494        }
1495    }
1496
1497    /* TODO: handle grouped decorations */
1498    /* TODO: handle index=1 dual source outputs from FS -- two vars will
1499     * have the same location, and we DON'T want to clobber. */
1500
1501    /* find the end of the entrypoint's name string. additional zero bytes follow the actual null
1502       terminator, to fill out the rest of the word - so we only need to look at the last byte in
1503       the word to determine which word contains the terminator. */
1504    uint32_t word = 3;
1505    while (entrypoint.word(word) & 0xff000000u) {
1506        ++word;
1507    }
1508    ++word;
1509
1510    for (; word < entrypoint.len(); word++) {
1511        auto insn = src->get_def(entrypoint.word(word));
1512        assert(insn != src->end());
1513        assert(insn.opcode() == spv::OpVariable);
1514
1515        if (insn.word(3) == static_cast<uint32_t>(sinterface)) {
1516            unsigned id = insn.word(2);
1517            unsigned type = insn.word(1);
1518
1519            int location = value_or_default(var_locations, id, -1);
1520            int builtin = value_or_default(var_builtins, id, -1);
1521            unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */
1522            bool is_patch = var_patch.find(id) != var_patch.end();
1523
1524            /* All variables and interface block members in the Input or Output storage classes
1525             * must be decorated with either a builtin or an explicit location.
1526             *
1527             * TODO: integrate the interface block support here. For now, don't complain --
1528             * a valid SPIRV module will only hit this path for the interface block case, as the
1529             * individual members of the type are decorated, rather than variable declarations.
1530             */
1531
1532            if (location != -1) {
1533                /* A user-defined interface variable, with a location. Where a variable
1534                 * occupied multiple locations, emit one result for each. */
1535                unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch);
1536                for (unsigned int offset = 0; offset < num_locations; offset++) {
1537                    interface_var v;
1538                    v.id = id;
1539                    v.type_id = type;
1540                    v.offset = offset;
1541                    v.is_patch = is_patch;
1542                    v.is_block_member = false;
1543                    out[std::make_pair(location + offset, component)] = v;
1544                }
1545            } else if (builtin == -1) {
1546                /* An interface block instance */
1547                collect_interface_block_members(src, out, blocks, is_array_of_verts, id, type, is_patch);
1548            }
1549        }
1550    }
1551}
1552
1553static void collect_interface_by_input_attachment_index(debug_report_data *report_data, shader_module const *src,
1554                                                        std::unordered_set<uint32_t> const &accessible_ids,
1555                                                        std::vector<std::pair<uint32_t, interface_var>> &out) {
1556
1557    for (auto insn : *src) {
1558        if (insn.opcode() == spv::OpDecorate) {
1559            if (insn.word(2) == spv::DecorationInputAttachmentIndex) {
1560                auto attachment_index = insn.word(3);
1561                auto id = insn.word(1);
1562
1563                if (accessible_ids.count(id)) {
1564                    auto def = src->get_def(id);
1565                    assert(def != src->end());
1566
1567                    if (def.opcode() == spv::OpVariable && insn.word(3) == spv::StorageClassUniformConstant) {
1568                        auto num_locations = get_locations_consumed_by_type(src, def.word(1), false);
1569                        for (unsigned int offset = 0; offset < num_locations; offset++) {
1570                            interface_var v;
1571                            v.id = id;
1572                            v.type_id = def.word(1);
1573                            v.offset = offset;
1574                            v.is_patch = false;
1575                            v.is_block_member = false;
1576                            out.emplace_back(attachment_index + offset, v);
1577                        }
1578                    }
1579                }
1580            }
1581        }
1582    }
1583}
1584
1585static void collect_interface_by_descriptor_slot(debug_report_data *report_data, shader_module const *src,
1586                                                 std::unordered_set<uint32_t> const &accessible_ids,
1587                                                 std::vector<std::pair<descriptor_slot_t, interface_var>> &out) {
1588
1589    std::unordered_map<unsigned, unsigned> var_sets;
1590    std::unordered_map<unsigned, unsigned> var_bindings;
1591
1592    for (auto insn : *src) {
1593        /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1594         * DecorationDescriptorSet and DecorationBinding.
1595         */
1596        if (insn.opcode() == spv::OpDecorate) {
1597            if (insn.word(2) == spv::DecorationDescriptorSet) {
1598                var_sets[insn.word(1)] = insn.word(3);
1599            }
1600
1601            if (insn.word(2) == spv::DecorationBinding) {
1602                var_bindings[insn.word(1)] = insn.word(3);
1603            }
1604        }
1605    }
1606
1607    for (auto id : accessible_ids) {
1608        auto insn = src->get_def(id);
1609        assert(insn != src->end());
1610
1611        if (insn.opcode() == spv::OpVariable &&
1612            (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1613            unsigned set = value_or_default(var_sets, insn.word(2), 0);
1614            unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1615
1616            interface_var v;
1617            v.id = insn.word(2);
1618            v.type_id = insn.word(1);
1619            v.offset = 0;
1620            v.is_patch = false;
1621            v.is_block_member = false;
1622            out.emplace_back(std::make_pair(set, binding), v);
1623        }
1624    }
1625}
1626
1627static bool validate_interface_between_stages(debug_report_data *report_data, shader_module const *producer,
1628                                              spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage,
1629                                              shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1630                                              shader_stage_attributes const *consumer_stage) {
1631    std::map<location_t, interface_var> outputs;
1632    std::map<location_t, interface_var> inputs;
1633
1634    bool pass = true;
1635
1636    collect_interface_by_location(producer, producer_entrypoint, spv::StorageClassOutput, outputs, producer_stage->arrayed_output);
1637    collect_interface_by_location(consumer, consumer_entrypoint, spv::StorageClassInput, inputs, consumer_stage->arrayed_input);
1638
1639    auto a_it = outputs.begin();
1640    auto b_it = inputs.begin();
1641
1642    /* maps sorted by key (location); walk them together to find mismatches */
1643    while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1644        bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1645        bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1646        auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1647        auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1648
1649        if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1650            if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1651                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1652                        "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first,
1653                        a_first.second, consumer_stage->name)) {
1654                pass = false;
1655            }
1656            a_it++;
1657        } else if (a_at_end || a_first > b_first) {
1658            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1659                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
1660                        "%s consumes input location %u.%u which is not written by %s", consumer_stage->name, b_first.first, b_first.second,
1661                        producer_stage->name)) {
1662                pass = false;
1663            }
1664            b_it++;
1665        } else {
1666            // subtleties of arrayed interfaces:
1667            // - if is_patch, then the member is not arrayed, even though the interface may be.
1668            // - if is_block_member, then the extra array level of an arrayed interface is not
1669            //   expressed in the member type -- it's expressed in the block type.
1670            if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id,
1671                             producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member,
1672                             consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member,
1673                             true)) {
1674                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1675                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1676                            a_first.first, a_first.second,
1677                            describe_type(producer, a_it->second.type_id).c_str(),
1678                            describe_type(consumer, b_it->second.type_id).c_str())) {
1679                    pass = false;
1680                }
1681            }
1682            if (a_it->second.is_patch != b_it->second.is_patch) {
1683                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1684                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1685                            "Decoration mismatch on location %u.%u: is per-%s in %s stage but "
1686                            "per-%s in %s stage", a_first.first, a_first.second,
1687                            a_it->second.is_patch ? "patch" : "vertex", producer_stage->name,
1688                            b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) {
1689                    pass = false;
1690                }
1691            }
1692            a_it++;
1693            b_it++;
1694        }
1695    }
1696
1697    return pass;
1698}
1699
1700enum FORMAT_TYPE {
1701    FORMAT_TYPE_UNDEFINED,
1702    FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
1703    FORMAT_TYPE_SINT,
1704    FORMAT_TYPE_UINT,
1705};
1706
1707static unsigned get_format_type(VkFormat fmt) {
1708    switch (fmt) {
1709    case VK_FORMAT_UNDEFINED:
1710        return FORMAT_TYPE_UNDEFINED;
1711    case VK_FORMAT_R8_SINT:
1712    case VK_FORMAT_R8G8_SINT:
1713    case VK_FORMAT_R8G8B8_SINT:
1714    case VK_FORMAT_R8G8B8A8_SINT:
1715    case VK_FORMAT_R16_SINT:
1716    case VK_FORMAT_R16G16_SINT:
1717    case VK_FORMAT_R16G16B16_SINT:
1718    case VK_FORMAT_R16G16B16A16_SINT:
1719    case VK_FORMAT_R32_SINT:
1720    case VK_FORMAT_R32G32_SINT:
1721    case VK_FORMAT_R32G32B32_SINT:
1722    case VK_FORMAT_R32G32B32A32_SINT:
1723    case VK_FORMAT_R64_SINT:
1724    case VK_FORMAT_R64G64_SINT:
1725    case VK_FORMAT_R64G64B64_SINT:
1726    case VK_FORMAT_R64G64B64A64_SINT:
1727    case VK_FORMAT_B8G8R8_SINT:
1728    case VK_FORMAT_B8G8R8A8_SINT:
1729    case VK_FORMAT_A8B8G8R8_SINT_PACK32:
1730    case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1731    case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1732        return FORMAT_TYPE_SINT;
1733    case VK_FORMAT_R8_UINT:
1734    case VK_FORMAT_R8G8_UINT:
1735    case VK_FORMAT_R8G8B8_UINT:
1736    case VK_FORMAT_R8G8B8A8_UINT:
1737    case VK_FORMAT_R16_UINT:
1738    case VK_FORMAT_R16G16_UINT:
1739    case VK_FORMAT_R16G16B16_UINT:
1740    case VK_FORMAT_R16G16B16A16_UINT:
1741    case VK_FORMAT_R32_UINT:
1742    case VK_FORMAT_R32G32_UINT:
1743    case VK_FORMAT_R32G32B32_UINT:
1744    case VK_FORMAT_R32G32B32A32_UINT:
1745    case VK_FORMAT_R64_UINT:
1746    case VK_FORMAT_R64G64_UINT:
1747    case VK_FORMAT_R64G64B64_UINT:
1748    case VK_FORMAT_R64G64B64A64_UINT:
1749    case VK_FORMAT_B8G8R8_UINT:
1750    case VK_FORMAT_B8G8R8A8_UINT:
1751    case VK_FORMAT_A8B8G8R8_UINT_PACK32:
1752    case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1753    case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1754        return FORMAT_TYPE_UINT;
1755    default:
1756        return FORMAT_TYPE_FLOAT;
1757    }
1758}
1759
1760/* characterizes a SPIR-V type appearing in an interface to a FF stage,
1761 * for comparison to a VkFormat's characterization above. */
1762static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1763    auto insn = src->get_def(type);
1764    assert(insn != src->end());
1765
1766    switch (insn.opcode()) {
1767    case spv::OpTypeInt:
1768        return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1769    case spv::OpTypeFloat:
1770        return FORMAT_TYPE_FLOAT;
1771    case spv::OpTypeVector:
1772        return get_fundamental_type(src, insn.word(2));
1773    case spv::OpTypeMatrix:
1774        return get_fundamental_type(src, insn.word(2));
1775    case spv::OpTypeArray:
1776        return get_fundamental_type(src, insn.word(2));
1777    case spv::OpTypePointer:
1778        return get_fundamental_type(src, insn.word(3));
1779    case spv::OpTypeImage:
1780        return get_fundamental_type(src, insn.word(2));
1781
1782    default:
1783        return FORMAT_TYPE_UNDEFINED;
1784    }
1785}
1786
1787static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1788    uint32_t bit_pos = u_ffs(stage);
1789    return bit_pos - 1;
1790}
1791
1792static bool validate_vi_consistency(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi) {
1793    /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
1794     * each binding should be specified only once.
1795     */
1796    std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1797    bool pass = true;
1798
1799    for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1800        auto desc = &vi->pVertexBindingDescriptions[i];
1801        auto &binding = bindings[desc->binding];
1802        if (binding) {
1803            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1804                        __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1805                        "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1806                pass = false;
1807            }
1808        } else {
1809            binding = desc;
1810        }
1811    }
1812
1813    return pass;
1814}
1815
1816static bool validate_vi_against_vs_inputs(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi,
1817                                          shader_module const *vs, spirv_inst_iter entrypoint) {
1818    std::map<location_t, interface_var> inputs;
1819    bool pass = true;
1820
1821    collect_interface_by_location(vs, entrypoint, spv::StorageClassInput, inputs, false);
1822
1823    /* Build index by location */
1824    std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1825    if (vi) {
1826        for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) {
1827            auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format);
1828            for (auto j = 0u; j < num_locations; j++) {
1829                attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i];
1830            }
1831        }
1832    }
1833
1834    auto it_a = attribs.begin();
1835    auto it_b = inputs.begin();
1836    bool used = false;
1837
1838    while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1839        bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1840        bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1841        auto a_first = a_at_end ? 0 : it_a->first;
1842        auto b_first = b_at_end ? 0 : it_b->first.first;
1843        if (!a_at_end && (b_at_end || a_first < b_first)) {
1844            if (!used && log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1845                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1846                        "Vertex attribute at location %d not consumed by VS", a_first)) {
1847                pass = false;
1848            }
1849            used = false;
1850            it_a++;
1851        } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1852            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1853                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "VS consumes input at location %d but not provided",
1854                        b_first)) {
1855                pass = false;
1856            }
1857            it_b++;
1858        } else {
1859            unsigned attrib_type = get_format_type(it_a->second->format);
1860            unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1861
1862            /* type checking */
1863            if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1864                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1865                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1866                            "Attribute type of `%s` at location %d does not match VS input type of `%s`",
1867                            string_VkFormat(it_a->second->format), a_first,
1868                            describe_type(vs, it_b->second.type_id).c_str())) {
1869                    pass = false;
1870                }
1871            }
1872
1873            /* OK! */
1874            used = true;
1875            it_b++;
1876        }
1877    }
1878
1879    return pass;
1880}
1881
1882static bool validate_fs_outputs_against_render_pass(debug_report_data *report_data, shader_module const *fs,
1883                                                    spirv_inst_iter entrypoint, VkRenderPassCreateInfo const *rpci,
1884                                                    uint32_t subpass_index) {
1885    std::map<location_t, interface_var> outputs;
1886    std::map<uint32_t, VkFormat> color_attachments;
1887    auto subpass = rpci->pSubpasses[subpass_index];
1888    for (auto i = 0u; i < subpass.colorAttachmentCount; ++i) {
1889        uint32_t attachment = subpass.pColorAttachments[i].attachment;
1890        if (attachment == VK_ATTACHMENT_UNUSED)
1891            continue;
1892        if (rpci->pAttachments[attachment].format != VK_FORMAT_UNDEFINED) {
1893            color_attachments[i] = rpci->pAttachments[attachment].format;
1894        }
1895    }
1896
1897    bool pass = true;
1898
1899    /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
1900
1901    collect_interface_by_location(fs, entrypoint, spv::StorageClassOutput, outputs, false);
1902
1903    auto it_a = outputs.begin();
1904    auto it_b = color_attachments.begin();
1905
1906    /* Walk attachment list and outputs together */
1907
1908    while ((outputs.size() > 0 && it_a != outputs.end()) || (color_attachments.size() > 0 && it_b != color_attachments.end())) {
1909        bool a_at_end = outputs.size() == 0 || it_a == outputs.end();
1910        bool b_at_end = color_attachments.size() == 0 || it_b == color_attachments.end();
1911
1912        if (!a_at_end && (b_at_end || it_a->first.first < it_b->first)) {
1913            if (log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1914                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1915                        "FS writes to output location %d with no matching attachment", it_a->first.first)) {
1916                pass = false;
1917            }
1918            it_a++;
1919        } else if (!b_at_end && (a_at_end || it_a->first.first > it_b->first)) {
1920            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1921                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by FS", it_b->first)) {
1922                pass = false;
1923            }
1924            it_b++;
1925        } else {
1926            unsigned output_type = get_fundamental_type(fs, it_a->second.type_id);
1927            unsigned att_type = get_format_type(it_b->second);
1928
1929            /* type checking */
1930            if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
1931                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1932                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1933                            "Attachment %d of type `%s` does not match FS output type of `%s`", it_b->first,
1934                            string_VkFormat(it_b->second),
1935                            describe_type(fs, it_a->second.type_id).c_str())) {
1936                    pass = false;
1937                }
1938            }
1939
1940            /* OK! */
1941            it_a++;
1942            it_b++;
1943        }
1944    }
1945
1946    return pass;
1947}
1948
1949/* For some analyses, we need to know about all ids referenced by the static call tree of a particular
1950 * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint,
1951 * for example.
1952 * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
1953 *  - NOT the shader input/output interfaces.
1954 *
1955 * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
1956 * converting parts of this to be generated from the machine-readable spec instead.
1957 */
1958static void mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint, std::unordered_set<uint32_t> &ids) {
1959    std::unordered_set<uint32_t> worklist;
1960    worklist.insert(entrypoint.word(2));
1961
1962    while (!worklist.empty()) {
1963        auto id_iter = worklist.begin();
1964        auto id = *id_iter;
1965        worklist.erase(id_iter);
1966
1967        auto insn = src->get_def(id);
1968        if (insn == src->end()) {
1969            /* id is something we didn't collect in build_def_index. that's OK -- we'll stumble
1970             * across all kinds of things here that we may not care about. */
1971            continue;
1972        }
1973
1974        /* try to add to the output set */
1975        if (!ids.insert(id).second) {
1976            continue; /* if we already saw this id, we don't want to walk it again. */
1977        }
1978
1979        switch (insn.opcode()) {
1980        case spv::OpFunction:
1981            /* scan whole body of the function, enlisting anything interesting */
1982            while (++insn, insn.opcode() != spv::OpFunctionEnd) {
1983                switch (insn.opcode()) {
1984                case spv::OpLoad:
1985                case spv::OpAtomicLoad:
1986                case spv::OpAtomicExchange:
1987                case spv::OpAtomicCompareExchange:
1988                case spv::OpAtomicCompareExchangeWeak:
1989                case spv::OpAtomicIIncrement:
1990                case spv::OpAtomicIDecrement:
1991                case spv::OpAtomicIAdd:
1992                case spv::OpAtomicISub:
1993                case spv::OpAtomicSMin:
1994                case spv::OpAtomicUMin:
1995                case spv::OpAtomicSMax:
1996                case spv::OpAtomicUMax:
1997                case spv::OpAtomicAnd:
1998                case spv::OpAtomicOr:
1999                case spv::OpAtomicXor:
2000                    worklist.insert(insn.word(3)); /* ptr */
2001                    break;
2002                case spv::OpStore:
2003                case spv::OpAtomicStore:
2004                    worklist.insert(insn.word(1)); /* ptr */
2005                    break;
2006                case spv::OpAccessChain:
2007                case spv::OpInBoundsAccessChain:
2008                    worklist.insert(insn.word(3)); /* base ptr */
2009                    break;
2010                case spv::OpSampledImage:
2011                case spv::OpImageSampleImplicitLod:
2012                case spv::OpImageSampleExplicitLod:
2013                case spv::OpImageSampleDrefImplicitLod:
2014                case spv::OpImageSampleDrefExplicitLod:
2015                case spv::OpImageSampleProjImplicitLod:
2016                case spv::OpImageSampleProjExplicitLod:
2017                case spv::OpImageSampleProjDrefImplicitLod:
2018                case spv::OpImageSampleProjDrefExplicitLod:
2019                case spv::OpImageFetch:
2020                case spv::OpImageGather:
2021                case spv::OpImageDrefGather:
2022                case spv::OpImageRead:
2023                case spv::OpImage:
2024                case spv::OpImageQueryFormat:
2025                case spv::OpImageQueryOrder:
2026                case spv::OpImageQuerySizeLod:
2027                case spv::OpImageQuerySize:
2028                case spv::OpImageQueryLod:
2029                case spv::OpImageQueryLevels:
2030                case spv::OpImageQuerySamples:
2031                case spv::OpImageSparseSampleImplicitLod:
2032                case spv::OpImageSparseSampleExplicitLod:
2033                case spv::OpImageSparseSampleDrefImplicitLod:
2034                case spv::OpImageSparseSampleDrefExplicitLod:
2035                case spv::OpImageSparseSampleProjImplicitLod:
2036                case spv::OpImageSparseSampleProjExplicitLod:
2037                case spv::OpImageSparseSampleProjDrefImplicitLod:
2038                case spv::OpImageSparseSampleProjDrefExplicitLod:
2039                case spv::OpImageSparseFetch:
2040                case spv::OpImageSparseGather:
2041                case spv::OpImageSparseDrefGather:
2042                case spv::OpImageTexelPointer:
2043                    worklist.insert(insn.word(3)); /* image or sampled image */
2044                    break;
2045                case spv::OpImageWrite:
2046                    worklist.insert(insn.word(1)); /* image -- different operand order to above */
2047                    break;
2048                case spv::OpFunctionCall:
2049                    for (uint32_t i = 3; i < insn.len(); i++) {
2050                        worklist.insert(insn.word(i)); /* fn itself, and all args */
2051                    }
2052                    break;
2053
2054                case spv::OpExtInst:
2055                    for (uint32_t i = 5; i < insn.len(); i++) {
2056                        worklist.insert(insn.word(i)); /* operands to ext inst */
2057                    }
2058                    break;
2059                }
2060            }
2061            break;
2062        }
2063    }
2064}
2065
2066static bool validate_push_constant_block_against_pipeline(debug_report_data *report_data,
2067                                                          std::vector<VkPushConstantRange> const *push_constant_ranges,
2068                                                          shader_module const *src, spirv_inst_iter type,
2069                                                          VkShaderStageFlagBits stage) {
2070    bool pass = true;
2071
2072    /* strip off ptrs etc */
2073    type = get_struct_type(src, type, false);
2074    assert(type != src->end());
2075
2076    /* validate directly off the offsets. this isn't quite correct for arrays
2077     * and matrices, but is a good first step. TODO: arrays, matrices, weird
2078     * sizes */
2079    for (auto insn : *src) {
2080        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
2081
2082            if (insn.word(3) == spv::DecorationOffset) {
2083                unsigned offset = insn.word(4);
2084                auto size = 4; /* bytes; TODO: calculate this based on the type */
2085
2086                bool found_range = false;
2087                for (auto const &range : *push_constant_ranges) {
2088                    if (range.offset <= offset && range.offset + range.size >= offset + size) {
2089                        found_range = true;
2090
2091                        if ((range.stageFlags & stage) == 0) {
2092                            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2093                                        __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2094                                        "Push constant range covering variable starting at "
2095                                        "offset %u not accessible from stage %s",
2096                                        offset, string_VkShaderStageFlagBits(stage))) {
2097                                pass = false;
2098                            }
2099                        }
2100
2101                        break;
2102                    }
2103                }
2104
2105                if (!found_range) {
2106                    if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2107                                __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
2108                                "Push constant range covering variable starting at "
2109                                "offset %u not declared in layout",
2110                                offset)) {
2111                        pass = false;
2112                    }
2113                }
2114            }
2115        }
2116    }
2117
2118    return pass;
2119}
2120
2121static bool validate_push_constant_usage(debug_report_data *report_data,
2122                                         std::vector<VkPushConstantRange> const *push_constant_ranges, shader_module const *src,
2123                                         std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
2124    bool pass = true;
2125
2126    for (auto id : accessible_ids) {
2127        auto def_insn = src->get_def(id);
2128        if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
2129            pass &= validate_push_constant_block_against_pipeline(report_data, push_constant_ranges, src,
2130                                                                  src->get_def(def_insn.word(1)), stage);
2131        }
2132    }
2133
2134    return pass;
2135}
2136
2137// For given pipelineLayout verify that the set_layout_node at slot.first
2138//  has the requested binding at slot.second and return ptr to that binding
2139static VkDescriptorSetLayoutBinding const * get_descriptor_binding(PIPELINE_LAYOUT_NODE const *pipelineLayout, descriptor_slot_t slot) {
2140
2141    if (!pipelineLayout)
2142        return nullptr;
2143
2144    if (slot.first >= pipelineLayout->set_layouts.size())
2145        return nullptr;
2146
2147    return pipelineLayout->set_layouts[slot.first]->GetDescriptorSetLayoutBindingPtrFromBinding(slot.second);
2148}
2149
2150// Block of code at start here for managing/tracking Pipeline state that this layer cares about
2151
2152static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
2153
2154// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
2155//   Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
2156//   to that same cmd buffer by separate thread are not changing state from underneath us
2157// Track the last cmd buffer touched by this thread
2158
2159static bool hasDrawCmd(GLOBAL_CB_NODE *pCB) {
2160    for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
2161        if (pCB->drawCount[i])
2162            return true;
2163    }
2164    return false;
2165}
2166
2167// Check object status for selected flag state
2168static bool validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
2169                            DRAW_STATE_ERROR error_code, const char *fail_msg) {
2170    if (!(pNode->status & status_mask)) {
2171        return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2172                       reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, error_code, "DS",
2173                       "CB object 0x%" PRIxLEAST64 ": %s", reinterpret_cast<const uint64_t &>(pNode->commandBuffer), fail_msg);
2174    }
2175    return false;
2176}
2177
2178// Retrieve pipeline node ptr for given pipeline object
2179static PIPELINE_NODE *getPipeline(layer_data const *my_data, VkPipeline pipeline) {
2180    auto it = my_data->pipelineMap.find(pipeline);
2181    if (it == my_data->pipelineMap.end()) {
2182        return nullptr;
2183    }
2184    return it->second;
2185}
2186
2187static RENDER_PASS_NODE *getRenderPass(layer_data const *my_data, VkRenderPass renderpass) {
2188    auto it = my_data->renderPassMap.find(renderpass);
2189    if (it == my_data->renderPassMap.end()) {
2190        return nullptr;
2191    }
2192    return it->second;
2193}
2194
2195static FRAMEBUFFER_NODE *getFramebuffer(const layer_data *my_data, VkFramebuffer framebuffer) {
2196    auto it = my_data->frameBufferMap.find(framebuffer);
2197    if (it == my_data->frameBufferMap.end()) {
2198        return nullptr;
2199    }
2200    return it->second.get();
2201}
2202
2203cvdescriptorset::DescriptorSetLayout const *getDescriptorSetLayout(layer_data const *my_data, VkDescriptorSetLayout dsLayout) {
2204    auto it = my_data->descriptorSetLayoutMap.find(dsLayout);
2205    if (it == my_data->descriptorSetLayoutMap.end()) {
2206        return nullptr;
2207    }
2208    return it->second;
2209}
2210
2211static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *my_data, VkPipelineLayout pipeLayout) {
2212    auto it = my_data->pipelineLayoutMap.find(pipeLayout);
2213    if (it == my_data->pipelineLayoutMap.end()) {
2214        return nullptr;
2215    }
2216    return &it->second;
2217}
2218
2219// Return true if for a given PSO, the given state enum is dynamic, else return false
2220static bool isDynamic(const PIPELINE_NODE *pPipeline, const VkDynamicState state) {
2221    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2222        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2223            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
2224                return true;
2225        }
2226    }
2227    return false;
2228}
2229
2230// Validate state stored as flags at time of draw call
2231static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe, bool indexedDraw) {
2232    bool result;
2233    result = validate_status(dev_data, pCB, CBSTATUS_VIEWPORT_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_VIEWPORT_NOT_BOUND,
2234                             "Dynamic viewport state not set for this command buffer");
2235    result |= validate_status(dev_data, pCB, CBSTATUS_SCISSOR_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_SCISSOR_NOT_BOUND,
2236                              "Dynamic scissor state not set for this command buffer");
2237    if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
2238        ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
2239         (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
2240        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2241                                  DRAWSTATE_LINE_WIDTH_NOT_BOUND, "Dynamic line width state not set for this command buffer");
2242    }
2243    if (pPipe->graphicsPipelineCI.pRasterizationState &&
2244        (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
2245        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2246                                  DRAWSTATE_DEPTH_BIAS_NOT_BOUND, "Dynamic depth bias state not set for this command buffer");
2247    }
2248    if (pPipe->blendConstantsEnabled) {
2249        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2250                                  DRAWSTATE_BLEND_NOT_BOUND, "Dynamic blend constants state not set for this command buffer");
2251    }
2252    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2253        (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
2254        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2255                                  DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, "Dynamic depth bounds state not set for this command buffer");
2256    }
2257    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2258        (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
2259        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2260                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil read mask state not set for this command buffer");
2261        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2262                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil write mask state not set for this command buffer");
2263        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2264                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil reference state not set for this command buffer");
2265    }
2266    if (indexedDraw) {
2267        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2268                                  DRAWSTATE_INDEX_BUFFER_NOT_BOUND,
2269                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted");
2270    }
2271    return result;
2272}
2273
2274// Verify attachment reference compatibility according to spec
2275//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2276//  If both AttachmentReference arrays have requested index, check their corresponding AttachmentDescriptions
2277//   to make sure that format and samples counts match.
2278//  If not, they are not compatible.
2279static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2280                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2281                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2282                                             const VkAttachmentDescription *pSecondaryAttachments) {
2283    // Check potential NULL cases first to avoid nullptr issues later
2284    if (pPrimary == nullptr) {
2285        if (pSecondary == nullptr) {
2286            return true;
2287        }
2288        return false;
2289    } else if (pSecondary == nullptr) {
2290        return false;
2291    }
2292    if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2293        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment)
2294            return true;
2295    } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2296        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment)
2297            return true;
2298    } else { // Format and sample count must match
2299        if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) && (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2300            return true;
2301        } else if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) || (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2302            return false;
2303        }
2304        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2305             pSecondaryAttachments[pSecondary[index].attachment].format) &&
2306            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2307             pSecondaryAttachments[pSecondary[index].attachment].samples))
2308            return true;
2309    }
2310    // Format and sample counts didn't match
2311    return false;
2312}
2313// TODO : Scrub verify_renderpass_compatibility() and validateRenderPassCompatibility() and unify them and/or share code
2314// For given primary RenderPass object and secondry RenderPassCreateInfo, verify that they're compatible
2315static bool verify_renderpass_compatibility(const layer_data *my_data, const VkRenderPassCreateInfo *primaryRPCI,
2316                                            const VkRenderPassCreateInfo *secondaryRPCI, string &errorMsg) {
2317    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2318        stringstream errorStr;
2319        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2320                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2321        errorMsg = errorStr.str();
2322        return false;
2323    }
2324    uint32_t spIndex = 0;
2325    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2326        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2327        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2328        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2329        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2330        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2331            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2332                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2333                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2334                stringstream errorStr;
2335                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2336                errorMsg = errorStr.str();
2337                return false;
2338            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2339                                                         primaryColorCount, primaryRPCI->pAttachments,
2340                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2341                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2342                stringstream errorStr;
2343                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2344                errorMsg = errorStr.str();
2345                return false;
2346            }
2347        }
2348
2349        if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2350                                              1, primaryRPCI->pAttachments,
2351                                              secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2352                                              1, secondaryRPCI->pAttachments)) {
2353            stringstream errorStr;
2354            errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
2355            errorMsg = errorStr.str();
2356            return false;
2357        }
2358
2359        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2360        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2361        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2362        for (uint32_t i = 0; i < inputMax; ++i) {
2363            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2364                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2365                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2366                stringstream errorStr;
2367                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2368                errorMsg = errorStr.str();
2369                return false;
2370            }
2371        }
2372    }
2373    return true;
2374}
2375
2376// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
2377// pipelineLayout[layoutIndex]
2378static bool verify_set_layout_compatibility(layer_data *my_data, const cvdescriptorset::DescriptorSet *pSet,
2379                                            PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex,
2380                                            string &errorMsg) {
2381    auto num_sets = pipeline_layout->set_layouts.size();
2382    if (layoutIndex >= num_sets) {
2383        stringstream errorStr;
2384        errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets
2385                 << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
2386                 << layoutIndex;
2387        errorMsg = errorStr.str();
2388        return false;
2389    }
2390    auto layout_node = pipeline_layout->set_layouts[layoutIndex];
2391    return pSet->IsCompatible(layout_node, &errorMsg);
2392}
2393
2394// Validate that data for each specialization entry is fully contained within the buffer.
2395static bool validate_specialization_offsets(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *info) {
2396    bool pass = true;
2397
2398    VkSpecializationInfo const *spec = info->pSpecializationInfo;
2399
2400    if (spec) {
2401        for (auto i = 0u; i < spec->mapEntryCount; i++) {
2402            if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2403                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2404                            /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC",
2405                            "Specialization entry %u (for constant id %u) references memory outside provided "
2406                            "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2407                            " bytes provided)",
2408                            i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2409                            spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) {
2410
2411                    pass = false;
2412                }
2413            }
2414        }
2415    }
2416
2417    return pass;
2418}
2419
2420static bool descriptor_type_match(shader_module const *module, uint32_t type_id,
2421                                  VkDescriptorType descriptor_type, unsigned &descriptor_count) {
2422    auto type = module->get_def(type_id);
2423
2424    descriptor_count = 1;
2425
2426    /* Strip off any array or ptrs. Where we remove array levels, adjust the
2427     * descriptor count for each dimension. */
2428    while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2429        if (type.opcode() == spv::OpTypeArray) {
2430            descriptor_count *= get_constant_value(module, type.word(3));
2431            type = module->get_def(type.word(2));
2432        }
2433        else {
2434            type = module->get_def(type.word(3));
2435        }
2436    }
2437
2438    switch (type.opcode()) {
2439    case spv::OpTypeStruct: {
2440        for (auto insn : *module) {
2441            if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2442                if (insn.word(2) == spv::DecorationBlock) {
2443                    return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2444                           descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2445                } else if (insn.word(2) == spv::DecorationBufferBlock) {
2446                    return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2447                           descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2448                }
2449            }
2450        }
2451
2452        /* Invalid */
2453        return false;
2454    }
2455
2456    case spv::OpTypeSampler:
2457        return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER ||
2458            descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2459
2460    case spv::OpTypeSampledImage:
2461        if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
2462            /* Slight relaxation for some GLSL historical madness: samplerBuffer
2463             * doesn't really have a sampler, and a texel buffer descriptor
2464             * doesn't really provide one. Allow this slight mismatch.
2465             */
2466            auto image_type = module->get_def(type.word(2));
2467            auto dim = image_type.word(3);
2468            auto sampled = image_type.word(7);
2469            return dim == spv::DimBuffer && sampled == 1;
2470        }
2471        return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2472
2473    case spv::OpTypeImage: {
2474        /* Many descriptor types backing image types-- depends on dimension
2475         * and whether the image will be used with a sampler. SPIRV for
2476         * Vulkan requires that sampled be 1 or 2 -- leaving the decision to
2477         * runtime is unacceptable.
2478         */
2479        auto dim = type.word(3);
2480        auto sampled = type.word(7);
2481
2482        if (dim == spv::DimSubpassData) {
2483            return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2484        } else if (dim == spv::DimBuffer) {
2485            if (sampled == 1) {
2486                return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2487            } else {
2488                return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2489            }
2490        } else if (sampled == 1) {
2491            return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE ||
2492                descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2493        } else {
2494            return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2495        }
2496    }
2497
2498    /* We shouldn't really see any other junk types -- but if we do, they're
2499     * a mismatch.
2500     */
2501    default:
2502        return false; /* Mismatch */
2503    }
2504}
2505
2506static bool require_feature(debug_report_data *report_data, VkBool32 feature, char const *feature_name) {
2507    if (!feature) {
2508        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2509                    __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2510                    "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2511                    "enabled on the device",
2512                    feature_name)) {
2513            return false;
2514        }
2515    }
2516
2517    return true;
2518}
2519
2520static bool validate_shader_capabilities(debug_report_data *report_data, shader_module const *src,
2521                                         VkPhysicalDeviceFeatures const *enabledFeatures) {
2522    bool pass = true;
2523
2524
2525    for (auto insn : *src) {
2526        if (insn.opcode() == spv::OpCapability) {
2527            switch (insn.word(1)) {
2528            case spv::CapabilityMatrix:
2529            case spv::CapabilityShader:
2530            case spv::CapabilityInputAttachment:
2531            case spv::CapabilitySampled1D:
2532            case spv::CapabilityImage1D:
2533            case spv::CapabilitySampledBuffer:
2534            case spv::CapabilityImageBuffer:
2535            case spv::CapabilityImageQuery:
2536            case spv::CapabilityDerivativeControl:
2537                // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2538                break;
2539
2540            case spv::CapabilityGeometry:
2541                pass &= require_feature(report_data, enabledFeatures->geometryShader, "geometryShader");
2542                break;
2543
2544            case spv::CapabilityTessellation:
2545                pass &= require_feature(report_data, enabledFeatures->tessellationShader, "tessellationShader");
2546                break;
2547
2548            case spv::CapabilityFloat64:
2549                pass &= require_feature(report_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2550                break;
2551
2552            case spv::CapabilityInt64:
2553                pass &= require_feature(report_data, enabledFeatures->shaderInt64, "shaderInt64");
2554                break;
2555
2556            case spv::CapabilityTessellationPointSize:
2557            case spv::CapabilityGeometryPointSize:
2558                pass &= require_feature(report_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2559                                        "shaderTessellationAndGeometryPointSize");
2560                break;
2561
2562            case spv::CapabilityImageGatherExtended:
2563                pass &= require_feature(report_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2564                break;
2565
2566            case spv::CapabilityStorageImageMultisample:
2567                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2568                break;
2569
2570            case spv::CapabilityUniformBufferArrayDynamicIndexing:
2571                pass &= require_feature(report_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2572                                        "shaderUniformBufferArrayDynamicIndexing");
2573                break;
2574
2575            case spv::CapabilitySampledImageArrayDynamicIndexing:
2576                pass &= require_feature(report_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2577                                        "shaderSampledImageArrayDynamicIndexing");
2578                break;
2579
2580            case spv::CapabilityStorageBufferArrayDynamicIndexing:
2581                pass &= require_feature(report_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2582                                        "shaderStorageBufferArrayDynamicIndexing");
2583                break;
2584
2585            case spv::CapabilityStorageImageArrayDynamicIndexing:
2586                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2587                                        "shaderStorageImageArrayDynamicIndexing");
2588                break;
2589
2590            case spv::CapabilityClipDistance:
2591                pass &= require_feature(report_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2592                break;
2593
2594            case spv::CapabilityCullDistance:
2595                pass &= require_feature(report_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2596                break;
2597
2598            case spv::CapabilityImageCubeArray:
2599                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2600                break;
2601
2602            case spv::CapabilitySampleRateShading:
2603                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2604                break;
2605
2606            case spv::CapabilitySparseResidency:
2607                pass &= require_feature(report_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2608                break;
2609
2610            case spv::CapabilityMinLod:
2611                pass &= require_feature(report_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2612                break;
2613
2614            case spv::CapabilitySampledCubeArray:
2615                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2616                break;
2617
2618            case spv::CapabilityImageMSArray:
2619                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2620                break;
2621
2622            case spv::CapabilityStorageImageExtendedFormats:
2623                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageExtendedFormats,
2624                                        "shaderStorageImageExtendedFormats");
2625                break;
2626
2627            case spv::CapabilityInterpolationFunction:
2628                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2629                break;
2630
2631            case spv::CapabilityStorageImageReadWithoutFormat:
2632                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2633                                        "shaderStorageImageReadWithoutFormat");
2634                break;
2635
2636            case spv::CapabilityStorageImageWriteWithoutFormat:
2637                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2638                                        "shaderStorageImageWriteWithoutFormat");
2639                break;
2640
2641            case spv::CapabilityMultiViewport:
2642                pass &= require_feature(report_data, enabledFeatures->multiViewport, "multiViewport");
2643                break;
2644
2645            default:
2646                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2647                            __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2648                            "Shader declares capability %u, not supported in Vulkan.",
2649                            insn.word(1)))
2650                    pass = false;
2651                break;
2652            }
2653        }
2654    }
2655
2656    return pass;
2657}
2658
2659
2660static uint32_t descriptor_type_to_reqs(shader_module const *module, uint32_t type_id) {
2661    auto type = module->get_def(type_id);
2662
2663    while (true) {
2664        switch (type.opcode()) {
2665        case spv::OpTypeArray:
2666        case spv::OpTypeSampledImage:
2667            type = module->get_def(type.word(2));
2668            break;
2669        case spv::OpTypePointer:
2670            type = module->get_def(type.word(3));
2671            break;
2672        case spv::OpTypeImage: {
2673            auto dim = type.word(3);
2674            auto arrayed = type.word(5);
2675            auto msaa = type.word(6);
2676
2677            switch (dim) {
2678            case spv::Dim1D:
2679                return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_1D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_1D;
2680            case spv::Dim2D:
2681                return (msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE) |
2682                    (arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_2D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_2D);
2683            case spv::Dim3D:
2684                return DESCRIPTOR_REQ_VIEW_TYPE_3D;
2685            case spv::DimCube:
2686                return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_CUBE_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_CUBE;
2687            default:  // subpass, buffer, etc.
2688                return 0;
2689            }
2690        }
2691        default:
2692            return 0;
2693        }
2694    }
2695}
2696
2697
2698static bool validate_pipeline_shader_stage(debug_report_data *report_data,
2699                                           VkPipelineShaderStageCreateInfo const *pStage,
2700                                           PIPELINE_NODE *pipeline,
2701                                           shader_module **out_module,
2702                                           spirv_inst_iter *out_entrypoint,
2703                                           VkPhysicalDeviceFeatures const *enabledFeatures,
2704                                           std::unordered_map<VkShaderModule,
2705                                           std::unique_ptr<shader_module>> const &shaderModuleMap) {
2706    bool pass = true;
2707    auto module_it = shaderModuleMap.find(pStage->module);
2708    auto module = *out_module = module_it->second.get();
2709    pass &= validate_specialization_offsets(report_data, pStage);
2710
2711    /* find the entrypoint */
2712    auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
2713    if (entrypoint == module->end()) {
2714        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2715                    __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC",
2716                    "No entrypoint found named `%s` for stage %s", pStage->pName,
2717                    string_VkShaderStageFlagBits(pStage->stage))) {
2718            pass = false;
2719        }
2720    }
2721
2722    /* validate shader capabilities against enabled device features */
2723    pass &= validate_shader_capabilities(report_data, module, enabledFeatures);
2724
2725    /* mark accessible ids */
2726    std::unordered_set<uint32_t> accessible_ids;
2727    mark_accessible_ids(module, entrypoint, accessible_ids);
2728
2729    /* validate descriptor set layout against what the entrypoint actually uses */
2730    std::vector<std::pair<descriptor_slot_t, interface_var>> descriptor_uses;
2731    collect_interface_by_descriptor_slot(report_data, module, accessible_ids, descriptor_uses);
2732
2733    auto pipelineLayout = pipeline->pipeline_layout;
2734
2735    /* validate push constant usage */
2736    pass &= validate_push_constant_usage(report_data, &pipelineLayout.push_constant_ranges, module, accessible_ids, pStage->stage);
2737
2738    /* validate descriptor use */
2739    for (auto use : descriptor_uses) {
2740        // While validating shaders capture which slots are used by the pipeline
2741        auto & reqs = pipeline->active_slots[use.first.first][use.first.second];
2742        reqs = descriptor_req(reqs | descriptor_type_to_reqs(module, use.second.type_id));
2743
2744        /* verify given pipelineLayout has requested setLayout with requested binding */
2745        const auto &binding = get_descriptor_binding(&pipelineLayout, use.first);
2746        unsigned required_descriptor_count;
2747
2748        if (!binding) {
2749            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2750                        __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2751                        "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2752                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2753                pass = false;
2754            }
2755        } else if (~binding->stageFlags & pStage->stage) {
2756            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2757                        /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2758                        "Shader uses descriptor slot %u.%u (used "
2759                        "as type `%s`) but descriptor not "
2760                        "accessible from stage %s",
2761                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2762                        string_VkShaderStageFlagBits(pStage->stage))) {
2763                pass = false;
2764            }
2765        } else if (!descriptor_type_match(module, use.second.type_id, binding->descriptorType,
2766                                          /*out*/ required_descriptor_count)) {
2767            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2768                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC", "Type mismatch on descriptor slot "
2769                                                                       "%u.%u (used as type `%s`) but "
2770                                                                       "descriptor of type %s",
2771                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2772                        string_VkDescriptorType(binding->descriptorType))) {
2773                pass = false;
2774            }
2775        } else if (binding->descriptorCount < required_descriptor_count) {
2776            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2777                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2778                        "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2779                        required_descriptor_count, use.first.first, use.first.second,
2780                        describe_type(module, use.second.type_id).c_str(), binding->descriptorCount)) {
2781                pass = false;
2782            }
2783        }
2784    }
2785
2786    /* validate use of input attachments against subpass structure */
2787    if (pStage->stage == VK_SHADER_STAGE_FRAGMENT_BIT) {
2788        std::vector<std::pair<uint32_t, interface_var>> input_attachment_uses;
2789        collect_interface_by_input_attachment_index(report_data, module, accessible_ids, input_attachment_uses);
2790
2791        auto rpci = pipeline->render_pass_ci.ptr();
2792        auto subpass = pipeline->graphicsPipelineCI.subpass;
2793
2794        for (auto use : input_attachment_uses) {
2795            auto input_attachments = rpci->pSubpasses[subpass].pInputAttachments;
2796            auto index = (input_attachments && use.first < rpci->pSubpasses[subpass].inputAttachmentCount) ?
2797                    input_attachments[use.first].attachment : VK_ATTACHMENT_UNUSED;
2798
2799            if (index == VK_ATTACHMENT_UNUSED) {
2800                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2801                            SHADER_CHECKER_MISSING_INPUT_ATTACHMENT, "SC",
2802                            "Shader consumes input attachment index %d but not provided in subpass",
2803                            use.first)) {
2804                    pass = false;
2805                }
2806            }
2807            else if (get_format_type(rpci->pAttachments[index].format) !=
2808                    get_fundamental_type(module, use.second.type_id)) {
2809                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2810                            SHADER_CHECKER_INPUT_ATTACHMENT_TYPE_MISMATCH, "SC",
2811                            "Subpass input attachment %u format of %s does not match type used in shader `%s`",
2812                            use.first, string_VkFormat(rpci->pAttachments[index].format),
2813                            describe_type(module, use.second.type_id).c_str())) {
2814                    pass = false;
2815                }
2816            }
2817        }
2818    }
2819
2820    return pass;
2821}
2822
2823
2824// Validate that the shaders used by the given pipeline and store the active_slots
2825//  that are actually used by the pipeline into pPipeline->active_slots
2826static bool validate_and_capture_pipeline_shader_state(debug_report_data *report_data, PIPELINE_NODE *pPipeline,
2827                                                       VkPhysicalDeviceFeatures const *enabledFeatures,
2828                                                       std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const & shaderModuleMap) {
2829    auto pCreateInfo = pPipeline->graphicsPipelineCI.ptr();
2830    int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2831    int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2832
2833    shader_module *shaders[5];
2834    memset(shaders, 0, sizeof(shaders));
2835    spirv_inst_iter entrypoints[5];
2836    memset(entrypoints, 0, sizeof(entrypoints));
2837    VkPipelineVertexInputStateCreateInfo const *vi = 0;
2838    bool pass = true;
2839
2840    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2841        auto pStage = &pCreateInfo->pStages[i];
2842        auto stage_id = get_shader_stage_id(pStage->stage);
2843        pass &= validate_pipeline_shader_stage(report_data, pStage, pPipeline,
2844                                               &shaders[stage_id], &entrypoints[stage_id],
2845                                               enabledFeatures, shaderModuleMap);
2846    }
2847
2848    vi = pCreateInfo->pVertexInputState;
2849
2850    if (vi) {
2851        pass &= validate_vi_consistency(report_data, vi);
2852    }
2853
2854    if (shaders[vertex_stage]) {
2855        pass &= validate_vi_against_vs_inputs(report_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
2856    }
2857
2858    int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2859    int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2860
2861    while (!shaders[producer] && producer != fragment_stage) {
2862        producer++;
2863        consumer++;
2864    }
2865
2866    for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2867        assert(shaders[producer]);
2868        if (shaders[consumer]) {
2869            pass &= validate_interface_between_stages(report_data,
2870                                                      shaders[producer], entrypoints[producer], &shader_stage_attribs[producer],
2871                                                      shaders[consumer], entrypoints[consumer], &shader_stage_attribs[consumer]);
2872
2873            producer = consumer;
2874        }
2875    }
2876
2877    if (shaders[fragment_stage]) {
2878        pass &= validate_fs_outputs_against_render_pass(report_data, shaders[fragment_stage], entrypoints[fragment_stage],
2879                                                        pPipeline->render_pass_ci.ptr(), pCreateInfo->subpass);
2880    }
2881
2882    return pass;
2883}
2884
2885static bool validate_compute_pipeline(debug_report_data *report_data, PIPELINE_NODE *pPipeline, VkPhysicalDeviceFeatures const *enabledFeatures,
2886                                      std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const & shaderModuleMap) {
2887    auto pCreateInfo = pPipeline->computePipelineCI.ptr();
2888
2889    shader_module *module;
2890    spirv_inst_iter entrypoint;
2891
2892    return validate_pipeline_shader_stage(report_data, &pCreateInfo->stage, pPipeline,
2893                                          &module, &entrypoint, enabledFeatures, shaderModuleMap);
2894}
2895// Return Set node ptr for specified set or else NULL
2896cvdescriptorset::DescriptorSet *getSetNode(const layer_data *my_data, VkDescriptorSet set) {
2897    auto set_it = my_data->setMap.find(set);
2898    if (set_it == my_data->setMap.end()) {
2899        return NULL;
2900    }
2901    return set_it->second;
2902}
2903// For the given command buffer, verify and update the state for activeSetBindingsPairs
2904//  This includes:
2905//  1. Verifying that any dynamic descriptor in that set has a valid dynamic offset bound.
2906//     To be valid, the dynamic offset combined with the offset and range from its
2907//     descriptor update must not overflow the size of its buffer being updated
2908//  2. Grow updateImages for given pCB to include any bound STORAGE_IMAGE descriptor images
2909//  3. Grow updateBuffers for pCB to include buffers from STORAGE*_BUFFER descriptor buffers
2910static bool validate_and_update_drawtime_descriptor_state(
2911    layer_data *dev_data, GLOBAL_CB_NODE *pCB,
2912    const vector<std::tuple<cvdescriptorset::DescriptorSet *, unordered_map<uint32_t, descriptor_req>,
2913                            std::vector<uint32_t> const *>> &activeSetBindingsPairs, const char *function) {
2914    bool result = false;
2915    for (auto set_bindings_pair : activeSetBindingsPairs) {
2916        cvdescriptorset::DescriptorSet *set_node = std::get<0>(set_bindings_pair);
2917        std::string err_str;
2918        if (!set_node->ValidateDrawState(std::get<1>(set_bindings_pair), *std::get<2>(set_bindings_pair),
2919                                         &err_str)) {
2920            // Report error here
2921            auto set = set_node->GetSet();
2922            result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2923                              reinterpret_cast<const uint64_t &>(set), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2924                              "DS 0x%" PRIxLEAST64 " encountered the following validation error at %s() time: %s",
2925                              reinterpret_cast<const uint64_t &>(set), function, err_str.c_str());
2926        }
2927        set_node->GetStorageUpdates(std::get<1>(set_bindings_pair), &pCB->updateBuffers, &pCB->updateImages);
2928    }
2929    return result;
2930}
2931
2932// For given pipeline, return number of MSAA samples, or one if MSAA disabled
2933static VkSampleCountFlagBits getNumSamples(PIPELINE_NODE const *pipe) {
2934    if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
2935        VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
2936        return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
2937    }
2938    return VK_SAMPLE_COUNT_1_BIT;
2939}
2940
2941static void list_bits(std::ostream& s, uint32_t bits) {
2942    for (int i = 0; i < 32 && bits; i++) {
2943        if (bits & (1 << i)) {
2944            s << i;
2945            bits &= ~(1 << i);
2946            if (bits) {
2947                s << ",";
2948            }
2949        }
2950    }
2951}
2952
2953// Validate draw-time state related to the PSO
2954static bool validatePipelineDrawtimeState(layer_data const *my_data,
2955                                          LAST_BOUND_STATE const &state,
2956                                          const GLOBAL_CB_NODE *pCB,
2957                                          PIPELINE_NODE const *pPipeline) {
2958    bool skip_call = false;
2959
2960    // Verify Vtx binding
2961    if (pPipeline->vertexBindingDescriptions.size() > 0) {
2962        for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
2963            auto vertex_binding = pPipeline->vertexBindingDescriptions[i].binding;
2964            if ((pCB->currentDrawData.buffers.size() < (vertex_binding + 1)) ||
2965                (pCB->currentDrawData.buffers[vertex_binding] == VK_NULL_HANDLE)) {
2966                skip_call |= log_msg(
2967                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2968                    DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2969                    "The Pipeline State Object (0x%" PRIxLEAST64 ") expects that this Command Buffer's vertex binding Index %u "
2970                    "should be set via vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct "
2971                    "at index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
2972                    (uint64_t)state.pipeline, vertex_binding, i, vertex_binding);
2973            }
2974        }
2975    } else {
2976        if (!pCB->currentDrawData.buffers.empty()) {
2977            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
2978                              0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2979                              "Vertex buffers are bound to command buffer (0x%" PRIxLEAST64
2980                              ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIxLEAST64 ").",
2981                              (uint64_t)pCB->commandBuffer, (uint64_t)state.pipeline);
2982        }
2983    }
2984    // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
2985    // Skip check if rasterization is disabled or there is no viewport.
2986    if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
2987         (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
2988        pPipeline->graphicsPipelineCI.pViewportState) {
2989        bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
2990        bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
2991
2992        if (dynViewport) {
2993            auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
2994            auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
2995            if (missingViewportMask) {
2996                std::stringstream ss;
2997                ss << "Dynamic viewport(s) ";
2998                list_bits(ss, missingViewportMask);
2999                ss << " are used by PSO, but were not provided via calls to vkCmdSetViewport().";
3000                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3001                                     __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3002                                     "%s", ss.str().c_str());
3003            }
3004        }
3005
3006        if (dynScissor) {
3007            auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
3008            auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
3009            if (missingScissorMask) {
3010                std::stringstream ss;
3011                ss << "Dynamic scissor(s) ";
3012                list_bits(ss, missingScissorMask);
3013                ss << " are used by PSO, but were not provided via calls to vkCmdSetScissor().";
3014                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3015                                     __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3016                                     "%s", ss.str().c_str());
3017            }
3018        }
3019    }
3020
3021    // Verify that any MSAA request in PSO matches sample# in bound FB
3022    // Skip the check if rasterization is disabled.
3023    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3024        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3025        VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
3026        if (pCB->activeRenderPass) {
3027            const VkRenderPassCreateInfo *render_pass_info = pCB->activeRenderPass->pCreateInfo;
3028            const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
3029            uint32_t i;
3030
3031            const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
3032            if ((color_blend_state != NULL) && (pCB->activeSubpass == pPipeline->graphicsPipelineCI.subpass) &&
3033                (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount)) {
3034                skip_call |=
3035                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3036                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
3037                                "Render pass subpass %u mismatch with blending state defined and blend state attachment "
3038                                "count %u while subpass color attachment count %u in Pipeline (0x%" PRIxLEAST64 ")!  These "
3039                                "must be the same at draw-time.",
3040                                pCB->activeSubpass, color_blend_state->attachmentCount, subpass_desc->colorAttachmentCount,
3041                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
3042            }
3043
3044            unsigned subpass_num_samples = 0;
3045
3046            for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
3047                auto attachment = subpass_desc->pColorAttachments[i].attachment;
3048                if (attachment != VK_ATTACHMENT_UNUSED)
3049                    subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
3050            }
3051
3052            if (subpass_desc->pDepthStencilAttachment &&
3053                subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3054                auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
3055                subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
3056            }
3057
3058            if (subpass_num_samples && static_cast<unsigned>(pso_num_samples) != subpass_num_samples) {
3059                skip_call |=
3060                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3061                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
3062                                "Num samples mismatch! At draw-time in Pipeline (0x%" PRIxLEAST64
3063                                ") with %u samples while current RenderPass (0x%" PRIxLEAST64 ") w/ %u samples!",
3064                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), pso_num_samples,
3065                                reinterpret_cast<const uint64_t &>(pCB->activeRenderPass->renderPass), subpass_num_samples);
3066            }
3067        } else {
3068            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3069                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
3070                                 "No active render pass found at draw-time in Pipeline (0x%" PRIxLEAST64 ")!",
3071                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
3072        }
3073    }
3074    // Verify that PSO creation renderPass is compatible with active renderPass
3075    if (pCB->activeRenderPass) {
3076        std::string err_string;
3077        if ((pCB->activeRenderPass->renderPass != pPipeline->graphicsPipelineCI.renderPass) &&
3078            !verify_renderpass_compatibility(my_data, pCB->activeRenderPass->pCreateInfo, pPipeline->render_pass_ci.ptr(),
3079                                             err_string)) {
3080            // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
3081            skip_call |=
3082                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3083                        reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
3084                        "At Draw time the active render pass (0x%" PRIxLEAST64 ") is incompatible w/ gfx pipeline "
3085                        "(0x%" PRIxLEAST64 ") that was created w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
3086                        reinterpret_cast<uint64_t &>(pCB->activeRenderPass->renderPass), reinterpret_cast<uint64_t &>(pPipeline),
3087                        reinterpret_cast<const uint64_t &>(pPipeline->graphicsPipelineCI.renderPass), err_string.c_str());
3088        }
3089    }
3090    // TODO : Add more checks here
3091
3092    return skip_call;
3093}
3094
3095// Validate overall state at the time of a draw call
3096static bool validate_and_update_draw_state(layer_data *my_data, GLOBAL_CB_NODE *pCB, const bool indexedDraw,
3097                                           const VkPipelineBindPoint bindPoint, const char *function) {
3098    bool result = false;
3099    auto const &state = pCB->lastBound[bindPoint];
3100    PIPELINE_NODE *pPipe = getPipeline(my_data, state.pipeline);
3101    if (nullptr == pPipe) {
3102        result |= log_msg(
3103            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
3104            DRAWSTATE_INVALID_PIPELINE, "DS",
3105            "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
3106        // Early return as any further checks below will be busted w/o a pipeline
3107        if (result)
3108            return true;
3109    }
3110    // First check flag states
3111    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
3112        result = validate_draw_state_flags(my_data, pCB, pPipe, indexedDraw);
3113
3114    // Now complete other state checks
3115    if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
3116        string errorString;
3117        auto pipeline_layout = pPipe->pipeline_layout;
3118
3119        // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets
3120        vector<std::tuple<cvdescriptorset::DescriptorSet *, unordered_map<uint32_t, descriptor_req>, std::vector<uint32_t> const *>> activeSetBindingsPairs;
3121        for (auto & setBindingPair : pPipe->active_slots) {
3122            uint32_t setIndex = setBindingPair.first;
3123            // If valid set is not bound throw an error
3124            if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
3125                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3126                                  DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
3127                                  "VkPipeline 0x%" PRIxLEAST64 " uses set #%u but that set is not bound.", (uint64_t)pPipe->pipeline,
3128                                  setIndex);
3129            } else if (!verify_set_layout_compatibility(my_data, state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex,
3130                                                        errorString)) {
3131                // Set is bound but not compatible w/ overlapping pipeline_layout from PSO
3132                VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
3133                result |=
3134                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3135                            (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
3136                            "VkDescriptorSet (0x%" PRIxLEAST64
3137                            ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIxLEAST64 " due to: %s",
3138                            reinterpret_cast<uint64_t &>(setHandle), setIndex, reinterpret_cast<uint64_t &>(pipeline_layout.layout),
3139                            errorString.c_str());
3140            } else { // Valid set is bound and layout compatible, validate that it's updated
3141                // Pull the set node
3142                cvdescriptorset::DescriptorSet *pSet = state.boundDescriptorSets[setIndex];
3143                // Save vector of all active sets to verify dynamicOffsets below
3144                activeSetBindingsPairs.push_back(std::make_tuple(pSet, setBindingPair.second,
3145                                                                 &state.dynamicOffsets[setIndex]));
3146                // Make sure set has been updated if it has no immutable samplers
3147                //  If it has immutable samplers, we'll flag error later as needed depending on binding
3148                if (!pSet->IsUpdated()) {
3149                    for (auto binding : setBindingPair.second) {
3150                        if (!pSet->GetImmutableSamplerPtrFromBinding(binding.first)) {
3151                            result |= log_msg(
3152                                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3153                                (uint64_t)pSet->GetSet(), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
3154                                "DS 0x%" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so "
3155                                "this will result in undefined behavior.",
3156                                (uint64_t)pSet->GetSet());
3157                        }
3158                    }
3159                }
3160            }
3161        }
3162        // For given active slots, verify any dynamic descriptors and record updated images & buffers
3163        result |= validate_and_update_drawtime_descriptor_state(my_data, pCB, activeSetBindingsPairs, function);
3164    }
3165
3166    // Check general pipeline state that needs to be validated at drawtime
3167    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
3168        result |= validatePipelineDrawtimeState(my_data, state, pCB, pPipe);
3169
3170    return result;
3171}
3172
3173// Validate HW line width capabilities prior to setting requested line width.
3174static bool verifyLineWidth(layer_data *my_data, DRAW_STATE_ERROR dsError, const uint64_t &target, float lineWidth) {
3175    bool skip_call = false;
3176
3177    // First check to see if the physical device supports wide lines.
3178    if ((VK_FALSE == my_data->phys_dev_properties.features.wideLines) && (1.0f != lineWidth)) {
3179        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target, __LINE__,
3180                             dsError, "DS", "Attempt to set lineWidth to %f but physical device wideLines feature "
3181                                            "not supported/enabled so lineWidth must be 1.0f!",
3182                             lineWidth);
3183    } else {
3184        // Otherwise, make sure the width falls in the valid range.
3185        if ((my_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) ||
3186            (my_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) {
3187            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target,
3188                                 __LINE__, dsError, "DS", "Attempt to set lineWidth to %f but physical device limits line width "
3189                                                          "to between [%f, %f]!",
3190                                 lineWidth, my_data->phys_dev_properties.properties.limits.lineWidthRange[0],
3191                                 my_data->phys_dev_properties.properties.limits.lineWidthRange[1]);
3192        }
3193    }
3194
3195    return skip_call;
3196}
3197
3198// Verify that create state for a pipeline is valid
3199static bool verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_NODE *> pPipelines,
3200                                      int pipelineIndex) {
3201    bool skip_call = false;
3202
3203    PIPELINE_NODE *pPipeline = pPipelines[pipelineIndex];
3204
3205    // If create derivative bit is set, check that we've specified a base
3206    // pipeline correctly, and that the base pipeline was created to allow
3207    // derivatives.
3208    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
3209        PIPELINE_NODE *pBasePipeline = nullptr;
3210        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
3211              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
3212            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3213                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3214                                 "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
3215        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
3216            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
3217                skip_call |=
3218                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3219                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3220                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
3221            } else {
3222                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
3223            }
3224        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
3225            pBasePipeline = getPipeline(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
3226        }
3227
3228        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
3229            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3230                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3231                                 "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
3232        }
3233    }
3234
3235    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
3236        if (!my_data->phys_dev_properties.features.independentBlend) {
3237            if (pPipeline->attachments.size() > 1) {
3238                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
3239                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
3240                    // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
3241                    // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
3242                    // only attachment state, so memcmp is best suited for the comparison
3243                    if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
3244                               sizeof(pAttachments[0]))) {
3245                        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3246                                             __LINE__, DRAWSTATE_INDEPENDENT_BLEND, "DS",
3247                                             "Invalid Pipeline CreateInfo: If independent blend feature not "
3248                                             "enabled, all elements of pAttachments must be identical");
3249                        break;
3250                    }
3251                }
3252            }
3253        }
3254        if (!my_data->phys_dev_properties.features.logicOp &&
3255            (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
3256            skip_call |=
3257                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3258                        DRAWSTATE_DISABLED_LOGIC_OP, "DS",
3259                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE");
3260        }
3261        if ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable == VK_TRUE) &&
3262            ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOp < VK_LOGIC_OP_CLEAR) ||
3263             (pPipeline->graphicsPipelineCI.pColorBlendState->logicOp > VK_LOGIC_OP_SET))) {
3264            skip_call |=
3265                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3266                        DRAWSTATE_INVALID_LOGIC_OP, "DS",
3267                        "Invalid Pipeline CreateInfo: If logicOpEnable is VK_TRUE, logicOp must be a valid VkLogicOp value");
3268        }
3269    }
3270
3271    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
3272    // produces nonsense errors that confuse users. Other layers should already
3273    // emit errors for renderpass being invalid.
3274    auto renderPass = getRenderPass(my_data, pPipeline->graphicsPipelineCI.renderPass);
3275    if (renderPass &&
3276        pPipeline->graphicsPipelineCI.subpass >= renderPass->pCreateInfo->subpassCount) {
3277        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3278                             DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
3279                                                                            "is out of range for this renderpass (0..%u)",
3280                             pPipeline->graphicsPipelineCI.subpass, renderPass->pCreateInfo->subpassCount - 1);
3281    }
3282
3283    if (!validate_and_capture_pipeline_shader_state(my_data->report_data, pPipeline, &my_data->phys_dev_properties.features,
3284                                                    my_data->shaderModuleMap)) {
3285        skip_call = true;
3286    }
3287    // Each shader's stage must be unique
3288    if (pPipeline->duplicate_shaders) {
3289        for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
3290            if (pPipeline->duplicate_shaders & stage) {
3291                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3292                                     __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3293                                     "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
3294                                     string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
3295            }
3296        }
3297    }
3298    // VS is required
3299    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3300        skip_call |=
3301            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3302                    DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vtx Shader required");
3303    }
3304    // Either both or neither TC/TE shaders should be defined
3305    if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) !=
3306        ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) {
3307        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3308                             DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3309                             "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair");
3310    }
3311    // Compute shaders should be specified independent of Gfx shaders
3312    if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) &&
3313        (pPipeline->active_shaders &
3314         (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
3315          VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) {
3316        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3317                             DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3318                             "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline");
3319    }
3320    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3321    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3322    if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3323        (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
3324         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3325        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3326                             DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3327                                                                            "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3328                                                                            "topology for tessellation pipelines");
3329    }
3330    if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
3331        pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3332        if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3333            skip_call |=
3334                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3335                        DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3336                                                                       "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3337                                                                       "topology is only valid for tessellation pipelines");
3338        }
3339        if (!pPipeline->graphicsPipelineCI.pTessellationState) {
3340            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3341                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3342                                 "Invalid Pipeline CreateInfo State: "
3343                                 "pTessellationState is NULL when VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3344                                 "topology used. pTessellationState must not be NULL in this case.");
3345        } else if (!pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints ||
3346                   (pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints > 32)) {
3347            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3348                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3349                                                                                "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3350                                                                                "topology used with patchControlPoints value %u."
3351                                                                                " patchControlPoints should be >0 and <=32.",
3352                                 pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints);
3353        }
3354    }
3355    // If a rasterization state is provided, make sure that the line width conforms to the HW.
3356    if (pPipeline->graphicsPipelineCI.pRasterizationState) {
3357        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) {
3358            skip_call |= verifyLineWidth(my_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, reinterpret_cast<uint64_t &>(pPipeline),
3359                                         pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth);
3360        }
3361    }
3362    // Viewport state must be included if rasterization is enabled.
3363    // If the viewport state is included, the viewport and scissor counts should always match.
3364    // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler
3365    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3366        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3367        if (!pPipeline->graphicsPipelineCI.pViewportState) {
3368            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3369                                 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport "
3370                                                                            "and scissors are dynamic PSO must include "
3371                                                                            "viewportCount and scissorCount in pViewportState.");
3372        } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount !=
3373                   pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
3374            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3375                                 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3376                                 "Gfx Pipeline viewport count (%u) must match scissor count (%u).",
3377                                 pPipeline->graphicsPipelineCI.pViewportState->viewportCount,
3378                                 pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3379        } else {
3380            // If viewport or scissor are not dynamic, then verify that data is appropriate for count
3381            bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3382            bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3383            if (!dynViewport) {
3384                if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount &&
3385                    !pPipeline->graphicsPipelineCI.pViewportState->pViewports) {
3386                    skip_call |=
3387                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3388                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3389                                "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you "
3390                                "must either include pViewports data, or include viewport in pDynamicState and set it with "
3391                                "vkCmdSetViewport().",
3392                                pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
3393                }
3394            }
3395            if (!dynScissor) {
3396                if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount &&
3397                    !pPipeline->graphicsPipelineCI.pViewportState->pScissors) {
3398                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3399                                         __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3400                                         "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you "
3401                                         "must either include pScissors data, or include scissor in pDynamicState and set it with "
3402                                         "vkCmdSetScissor().",
3403                                         pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3404                }
3405            }
3406        }
3407    }
3408    return skip_call;
3409}
3410
3411// Free the Pipeline nodes
3412static void deletePipelines(layer_data *my_data) {
3413    if (my_data->pipelineMap.size() <= 0)
3414        return;
3415    for (auto &pipe_map_pair : my_data->pipelineMap) {
3416        delete pipe_map_pair.second;
3417    }
3418    my_data->pipelineMap.clear();
3419}
3420
3421// Block of code at start here specifically for managing/tracking DSs
3422
3423// Return Pool node ptr for specified pool or else NULL
3424DESCRIPTOR_POOL_NODE *getPoolNode(const layer_data *dev_data, const VkDescriptorPool pool) {
3425    auto pool_it = dev_data->descriptorPoolMap.find(pool);
3426    if (pool_it == dev_data->descriptorPoolMap.end()) {
3427        return NULL;
3428    }
3429    return pool_it->second;
3430}
3431
3432// Return false if update struct is of valid type, otherwise flag error and return code from callback
3433static bool validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3434    switch (pUpdateStruct->sType) {
3435    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3436    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3437        return false;
3438    default:
3439        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3440                       DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3441                       "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3442                       string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3443    }
3444}
3445
3446// Set count for given update struct in the last parameter
3447static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3448    switch (pUpdateStruct->sType) {
3449    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3450        return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3451    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3452        // TODO : Need to understand this case better and make sure code is correct
3453        return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3454    default:
3455        return 0;
3456    }
3457}
3458
3459// For given layout and update, return the first overall index of the layout that is updated
3460static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3461                                    const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3462    return binding_start_index + arrayIndex;
3463}
3464// For given layout and update, return the last overall index of the layout that is updated
3465static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3466                                  const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3467    uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3468    return binding_start_index + arrayIndex + count - 1;
3469}
3470// Verify that the descriptor type in the update struct matches what's expected by the layout
3471static bool validateUpdateConsistency(layer_data *my_data, const VkDevice device, const VkDescriptorType layout_type,
3472                                      const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3473    // First get actual type of update
3474    bool skip_call = false;
3475    VkDescriptorType actualType = VK_DESCRIPTOR_TYPE_MAX_ENUM;
3476    switch (pUpdateStruct->sType) {
3477    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3478        actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3479        break;
3480    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3481        /* no need to validate */
3482        return false;
3483        break;
3484    default:
3485        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3486                             DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3487                             "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3488                             string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3489    }
3490    if (!skip_call) {
3491        if (layout_type != actualType) {
3492            skip_call |= log_msg(
3493                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3494                DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3495                "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3496                string_VkDescriptorType(actualType), string_VkDescriptorType(layout_type));
3497        }
3498    }
3499    return skip_call;
3500}
3501//TODO: Consolidate functions
3502bool FindLayout(const GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) {
3503    layer_data *my_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
3504    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3505        return false;
3506    }
3507    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3508    imgpair.subresource.aspectMask = aspectMask;
3509    auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3510    if (imgsubIt == pCB->imageLayoutMap.end()) {
3511        return false;
3512    }
3513    if (node.layout != VK_IMAGE_LAYOUT_MAX_ENUM && node.layout != imgsubIt->second.layout) {
3514        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3515                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3516                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3517                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.layout), string_VkImageLayout(imgsubIt->second.layout));
3518    }
3519    if (node.initialLayout != VK_IMAGE_LAYOUT_MAX_ENUM && node.initialLayout != imgsubIt->second.initialLayout) {
3520        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3521                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3522                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple initial layout types: %s and %s",
3523                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.initialLayout), string_VkImageLayout(imgsubIt->second.initialLayout));
3524    }
3525    node = imgsubIt->second;
3526    return true;
3527}
3528
3529bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) {
3530    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3531        return false;
3532    }
3533    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3534    imgpair.subresource.aspectMask = aspectMask;
3535    auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3536    if (imgsubIt == my_data->imageLayoutMap.end()) {
3537        return false;
3538    }
3539    if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) {
3540        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3541                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3542                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3543                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(layout), string_VkImageLayout(imgsubIt->second.layout));
3544    }
3545    layout = imgsubIt->second.layout;
3546    return true;
3547}
3548
3549// find layout(s) on the cmd buf level
3550bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3551    ImageSubresourcePair imgpair = {image, true, range};
3552    node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM);
3553    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT);
3554    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_DEPTH_BIT);
3555    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_STENCIL_BIT);
3556    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_METADATA_BIT);
3557    if (node.layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3558        imgpair = {image, false, VkImageSubresource()};
3559        auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3560        if (imgsubIt == pCB->imageLayoutMap.end())
3561            return false;
3562        node = imgsubIt->second;
3563    }
3564    return true;
3565}
3566
3567// find layout(s) on the global level
3568bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3569    layout = VK_IMAGE_LAYOUT_MAX_ENUM;
3570    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3571    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3572    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3573    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3574    if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3575        imgpair = {imgpair.image, false, VkImageSubresource()};
3576        auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3577        if (imgsubIt == my_data->imageLayoutMap.end())
3578            return false;
3579        layout = imgsubIt->second.layout;
3580    }
3581    return true;
3582}
3583
3584bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3585    ImageSubresourcePair imgpair = {image, true, range};
3586    return FindLayout(my_data, imgpair, layout);
3587}
3588
3589bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3590    auto sub_data = my_data->imageSubresourceMap.find(image);
3591    if (sub_data == my_data->imageSubresourceMap.end())
3592        return false;
3593    auto img_node = getImageNode(my_data, image);
3594    if (!img_node)
3595        return false;
3596    bool ignoreGlobal = false;
3597    // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3598    // potential errors in this case.
3599    if (sub_data->second.size() >= (img_node->createInfo.arrayLayers * img_node->createInfo.mipLevels + 1)) {
3600        ignoreGlobal = true;
3601    }
3602    for (auto imgsubpair : sub_data->second) {
3603        if (ignoreGlobal && !imgsubpair.hasSubresource)
3604            continue;
3605        auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3606        if (img_data != my_data->imageLayoutMap.end()) {
3607            layouts.push_back(img_data->second.layout);
3608        }
3609    }
3610    return true;
3611}
3612
3613// Set the layout on the global level
3614void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3615    VkImage &image = imgpair.image;
3616    // TODO (mlentine): Maybe set format if new? Not used atm.
3617    my_data->imageLayoutMap[imgpair].layout = layout;
3618    // TODO (mlentine): Maybe make vector a set?
3619    auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3620    if (subresource == my_data->imageSubresourceMap[image].end()) {
3621        my_data->imageSubresourceMap[image].push_back(imgpair);
3622    }
3623}
3624
3625// Set the layout on the cmdbuf level
3626void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3627    pCB->imageLayoutMap[imgpair] = node;
3628    // TODO (mlentine): Maybe make vector a set?
3629    auto subresource =
3630        std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair);
3631    if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) {
3632        pCB->imageSubresourceMap[imgpair.image].push_back(imgpair);
3633    }
3634}
3635
3636void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3637    // TODO (mlentine): Maybe make vector a set?
3638    if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) !=
3639        pCB->imageSubresourceMap[imgpair.image].end()) {
3640        pCB->imageLayoutMap[imgpair].layout = layout;
3641    } else {
3642        // TODO (mlentine): Could be expensive and might need to be removed.
3643        assert(imgpair.hasSubresource);
3644        IMAGE_CMD_BUF_LAYOUT_NODE node;
3645        if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) {
3646            node.initialLayout = layout;
3647        }
3648        SetLayout(pCB, imgpair, {node.initialLayout, layout});
3649    }
3650}
3651
3652template <class OBJECT, class LAYOUT>
3653void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
3654    if (imgpair.subresource.aspectMask & aspectMask) {
3655        imgpair.subresource.aspectMask = aspectMask;
3656        SetLayout(pObject, imgpair, layout);
3657    }
3658}
3659
3660template <class OBJECT, class LAYOUT>
3661void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
3662    ImageSubresourcePair imgpair = {image, true, range};
3663    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3664    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3665    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3666    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3667}
3668
3669template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) {
3670    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3671    SetLayout(pObject, image, imgpair, layout);
3672}
3673
3674void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
3675    auto iv_data = getImageViewData(dev_data, imageView);
3676    assert(iv_data);
3677    const VkImage &image = iv_data->image;
3678    const VkImageSubresourceRange &subRange = iv_data->subresourceRange;
3679    // TODO: Do not iterate over every possibility - consolidate where possible
3680    for (uint32_t j = 0; j < subRange.levelCount; j++) {
3681        uint32_t level = subRange.baseMipLevel + j;
3682        for (uint32_t k = 0; k < subRange.layerCount; k++) {
3683            uint32_t layer = subRange.baseArrayLayer + k;
3684            VkImageSubresource sub = {subRange.aspectMask, level, layer};
3685            // TODO: If ImageView was created with depth or stencil, transition both layouts as
3686            // the aspectMask is ignored and both are used. Verify that the extra implicit layout
3687            // is OK for descriptor set layout validation
3688            if (subRange.aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
3689                if (vk_format_is_depth_and_stencil(iv_data->format)) {
3690                    sub.aspectMask |= (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT);
3691                }
3692            }
3693            SetLayout(pCB, image, sub, layout);
3694        }
3695    }
3696}
3697
3698// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3699// func_str is the name of the calling function
3700// Return false if no errors occur
3701// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
3702static bool validateIdleDescriptorSet(const layer_data *my_data, VkDescriptorSet set, std::string func_str) {
3703    bool skip_call = false;
3704    auto set_node = my_data->setMap.find(set);
3705    if (set_node == my_data->setMap.end()) {
3706        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3707                             (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3708                             "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3709                             (uint64_t)(set));
3710    } else {
3711        if (set_node->second->in_use.load()) {
3712            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3713                                 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_OBJECT_INUSE,
3714                                 "DS", "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that is in use by a command buffer.",
3715                                 func_str.c_str(), (uint64_t)(set));
3716        }
3717    }
3718    return skip_call;
3719}
3720
3721// Remove set from setMap and delete the set
3722static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
3723    dev_data->setMap.erase(descriptor_set->GetSet());
3724    delete descriptor_set;
3725}
3726// Free all DS Pools including their Sets & related sub-structs
3727// NOTE : Calls to this function should be wrapped in mutex
3728static void deletePools(layer_data *my_data) {
3729    if (my_data->descriptorPoolMap.size() <= 0)
3730        return;
3731    for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
3732        // Remove this pools' sets from setMap and delete them
3733        for (auto ds : (*ii).second->sets) {
3734            freeDescriptorSet(my_data, ds);
3735        }
3736        (*ii).second->sets.clear();
3737    }
3738    my_data->descriptorPoolMap.clear();
3739}
3740
3741static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
3742                                VkDescriptorPoolResetFlags flags) {
3743    DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pool);
3744    // TODO: validate flags
3745    // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
3746    for (auto ds : pPool->sets) {
3747        freeDescriptorSet(my_data, ds);
3748    }
3749    pPool->sets.clear();
3750    // Reset available count for each type and available sets for this pool
3751    for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
3752        pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
3753    }
3754    pPool->availableSets = pPool->maxSets;
3755}
3756
3757// For given CB object, fetch associated CB Node from map
3758static GLOBAL_CB_NODE *getCBNode(layer_data const *my_data, const VkCommandBuffer cb) {
3759    auto it = my_data->commandBufferMap.find(cb);
3760    if (it == my_data->commandBufferMap.end()) {
3761        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3762                reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3763                "Attempt to use CommandBuffer 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb));
3764        return NULL;
3765    }
3766    return it->second;
3767}
3768// Free all CB Nodes
3769// NOTE : Calls to this function should be wrapped in mutex
3770static void deleteCommandBuffers(layer_data *my_data) {
3771    if (my_data->commandBufferMap.empty()) {
3772        return;
3773    }
3774    for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
3775        delete (*ii).second;
3776    }
3777    my_data->commandBufferMap.clear();
3778}
3779
3780static bool report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
3781    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3782                   (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
3783                   "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
3784}
3785
3786bool validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
3787    if (!pCB->activeRenderPass)
3788        return false;
3789    bool skip_call = false;
3790    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
3791        (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
3792        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3793                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3794                             "Commands cannot be called in a subpass using secondary command buffers.");
3795    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
3796        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3797                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3798                             "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
3799    }
3800    return skip_call;
3801}
3802
3803static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3804    if (!(flags & VK_QUEUE_GRAPHICS_BIT))
3805        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3806                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3807                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3808    return false;
3809}
3810
3811static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3812    if (!(flags & VK_QUEUE_COMPUTE_BIT))
3813        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3814                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3815                       "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
3816    return false;
3817}
3818
3819static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3820    if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
3821        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3822                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3823                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3824    return false;
3825}
3826
3827// Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not
3828//  in the recording state or if there's an issue with the Cmd ordering
3829static bool addCmd(layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
3830    bool skip_call = false;
3831    auto pPool = getCommandPoolNode(my_data, pCB->createInfo.commandPool);
3832    if (pPool) {
3833        VkQueueFlags flags = my_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].queueFlags;
3834        switch (cmd) {
3835        case CMD_BINDPIPELINE:
3836        case CMD_BINDPIPELINEDELTA:
3837        case CMD_BINDDESCRIPTORSETS:
3838        case CMD_FILLBUFFER:
3839        case CMD_CLEARCOLORIMAGE:
3840        case CMD_SETEVENT:
3841        case CMD_RESETEVENT:
3842        case CMD_WAITEVENTS:
3843        case CMD_BEGINQUERY:
3844        case CMD_ENDQUERY:
3845        case CMD_RESETQUERYPOOL:
3846        case CMD_COPYQUERYPOOLRESULTS:
3847        case CMD_WRITETIMESTAMP:
3848            skip_call |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3849            break;
3850        case CMD_SETVIEWPORTSTATE:
3851        case CMD_SETSCISSORSTATE:
3852        case CMD_SETLINEWIDTHSTATE:
3853        case CMD_SETDEPTHBIASSTATE:
3854        case CMD_SETBLENDSTATE:
3855        case CMD_SETDEPTHBOUNDSSTATE:
3856        case CMD_SETSTENCILREADMASKSTATE:
3857        case CMD_SETSTENCILWRITEMASKSTATE:
3858        case CMD_SETSTENCILREFERENCESTATE:
3859        case CMD_BINDINDEXBUFFER:
3860        case CMD_BINDVERTEXBUFFER:
3861        case CMD_DRAW:
3862        case CMD_DRAWINDEXED:
3863        case CMD_DRAWINDIRECT:
3864        case CMD_DRAWINDEXEDINDIRECT:
3865        case CMD_BLITIMAGE:
3866        case CMD_CLEARATTACHMENTS:
3867        case CMD_CLEARDEPTHSTENCILIMAGE:
3868        case CMD_RESOLVEIMAGE:
3869        case CMD_BEGINRENDERPASS:
3870        case CMD_NEXTSUBPASS:
3871        case CMD_ENDRENDERPASS:
3872            skip_call |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
3873            break;
3874        case CMD_DISPATCH:
3875        case CMD_DISPATCHINDIRECT:
3876            skip_call |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3877            break;
3878        case CMD_COPYBUFFER:
3879        case CMD_COPYIMAGE:
3880        case CMD_COPYBUFFERTOIMAGE:
3881        case CMD_COPYIMAGETOBUFFER:
3882        case CMD_CLONEIMAGEDATA:
3883        case CMD_UPDATEBUFFER:
3884        case CMD_PIPELINEBARRIER:
3885        case CMD_EXECUTECOMMANDS:
3886        case CMD_END:
3887            break;
3888        default:
3889            break;
3890        }
3891    }
3892    if (pCB->state != CB_RECORDING) {
3893        skip_call |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
3894    } else {
3895        skip_call |= validateCmdsInCmdBuffer(my_data, pCB, cmd);
3896        CMD_NODE cmdNode = {};
3897        // init cmd node and append to end of cmd LL
3898        cmdNode.cmdNumber = ++pCB->numCmds;
3899        cmdNode.type = cmd;
3900        pCB->cmds.push_back(cmdNode);
3901    }
3902    return skip_call;
3903}
3904// Tie the VK_OBJECT to the cmd buffer which includes:
3905//  Add object_binding to cmd buffer
3906//  Add cb_binding to object
3907static void addCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) {
3908    cb_bindings->insert(cb_node);
3909    cb_node->object_bindings.insert(obj);
3910}
3911// For a given object, if cb_node is in that objects cb_bindings, remove cb_node
3912static void removeCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
3913    switch (object->type) {
3914    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
3915        auto img_node = getImageNode(dev_data, reinterpret_cast<const VkImage &>(object->handle));
3916        if (img_node)
3917            img_node->cb_bindings.erase(cb_node);
3918        break;
3919    }
3920    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
3921        auto buf_node = getBufferNode(dev_data, reinterpret_cast<const VkBuffer &>(object->handle));
3922        if (buf_node)
3923            buf_node->cb_bindings.erase(cb_node);
3924        break;
3925    }
3926    case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: {
3927        auto evt_node = getEventNode(dev_data, reinterpret_cast<const VkEvent &>(object->handle));
3928        if (evt_node)
3929            evt_node->cb_bindings.erase(cb_node);
3930        break;
3931    }
3932    case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: {
3933        auto qp_node = getQueryPoolNode(dev_data, reinterpret_cast<const VkQueryPool &>(object->handle));
3934        if (qp_node)
3935            qp_node->cb_bindings.erase(cb_node);
3936        break;
3937    }
3938    case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: {
3939        auto pipe_node = getPipeline(dev_data, reinterpret_cast<const VkPipeline &>(object->handle));
3940        if (pipe_node)
3941            pipe_node->cb_bindings.erase(cb_node);
3942        break;
3943    }
3944    default:
3945        assert(0); // unhandled object type
3946    }
3947}
3948// Reset the command buffer state
3949//  Maintain the createInfo and set state to CB_NEW, but clear all other state
3950static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
3951    GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
3952    if (pCB) {
3953        pCB->in_use.store(0);
3954        pCB->cmds.clear();
3955        // Reset CB state (note that createInfo is not cleared)
3956        pCB->commandBuffer = cb;
3957        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
3958        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
3959        pCB->numCmds = 0;
3960        memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
3961        pCB->state = CB_NEW;
3962        pCB->submitCount = 0;
3963        pCB->status = 0;
3964        pCB->viewportMask = 0;
3965        pCB->scissorMask = 0;
3966
3967        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
3968            // Before clearing lastBoundState, remove any CB bindings from all uniqueBoundSets
3969            for (auto set : pCB->lastBound[i].uniqueBoundSets) {
3970                set->RemoveBoundCommandBuffer(pCB);
3971            }
3972            pCB->lastBound[i].reset();
3973        }
3974
3975        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
3976        pCB->activeRenderPass = nullptr;
3977        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
3978        pCB->activeSubpass = 0;
3979        pCB->broken_bindings.clear();
3980        pCB->waitedEvents.clear();
3981        pCB->events.clear();
3982        pCB->writeEventsBeforeWait.clear();
3983        pCB->waitedEventsBeforeQueryReset.clear();
3984        pCB->queryToStateMap.clear();
3985        pCB->activeQueries.clear();
3986        pCB->startedQueries.clear();
3987        pCB->imageSubresourceMap.clear();
3988        pCB->imageLayoutMap.clear();
3989        pCB->eventToStageMap.clear();
3990        pCB->drawData.clear();
3991        pCB->currentDrawData.buffers.clear();
3992        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
3993        // Make sure any secondaryCommandBuffers are removed from globalInFlight
3994        for (auto secondary_cb : pCB->secondaryCommandBuffers) {
3995            dev_data->globalInFlightCmdBuffers.erase(secondary_cb);
3996        }
3997        pCB->secondaryCommandBuffers.clear();
3998        pCB->updateImages.clear();
3999        pCB->updateBuffers.clear();
4000        clear_cmd_buf_and_mem_references(dev_data, pCB);
4001        pCB->eventUpdates.clear();
4002        pCB->queryUpdates.clear();
4003
4004        // Remove object bindings
4005        for (auto obj : pCB->object_bindings) {
4006            removeCommandBufferBinding(dev_data, &obj, pCB);
4007        }
4008        // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
4009        for (auto framebuffer : pCB->framebuffers) {
4010            auto fb_node = getFramebuffer(dev_data, framebuffer);
4011            if (fb_node)
4012                fb_node->cb_bindings.erase(pCB);
4013        }
4014        pCB->framebuffers.clear();
4015        pCB->activeFramebuffer = VK_NULL_HANDLE;
4016    }
4017}
4018
4019// Set PSO-related status bits for CB, including dynamic state set via PSO
4020static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe) {
4021    // Account for any dynamic state not set via this PSO
4022    if (!pPipe->graphicsPipelineCI.pDynamicState ||
4023        !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) { // All state is static
4024        pCB->status |= CBSTATUS_ALL;
4025    } else {
4026        // First consider all state on
4027        // Then unset any state that's noted as dynamic in PSO
4028        // Finally OR that into CB statemask
4029        CBStatusFlags psoDynStateMask = CBSTATUS_ALL;
4030        for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
4031            switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) {
4032            case VK_DYNAMIC_STATE_VIEWPORT:
4033                psoDynStateMask &= ~CBSTATUS_VIEWPORT_SET;
4034                break;
4035            case VK_DYNAMIC_STATE_SCISSOR:
4036                psoDynStateMask &= ~CBSTATUS_SCISSOR_SET;
4037                break;
4038            case VK_DYNAMIC_STATE_LINE_WIDTH:
4039                psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
4040                break;
4041            case VK_DYNAMIC_STATE_DEPTH_BIAS:
4042                psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
4043                break;
4044            case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
4045                psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
4046                break;
4047            case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
4048                psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
4049                break;
4050            case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
4051                psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
4052                break;
4053            case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
4054                psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
4055                break;
4056            case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
4057                psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
4058                break;
4059            default:
4060                // TODO : Flag error here
4061                break;
4062            }
4063        }
4064        pCB->status |= psoDynStateMask;
4065    }
4066}
4067
4068// Print the last bound Gfx Pipeline
4069static bool printPipeline(layer_data *my_data, const VkCommandBuffer cb) {
4070    bool skip_call = false;
4071    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4072    if (pCB) {
4073        PIPELINE_NODE *pPipeTrav = getPipeline(my_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
4074        if (!pPipeTrav) {
4075            // nothing to print
4076        } else {
4077            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4078                                 __LINE__, DRAWSTATE_NONE, "DS", "%s",
4079                                 vk_print_vkgraphicspipelinecreateinfo(
4080                                     reinterpret_cast<const VkGraphicsPipelineCreateInfo *>(&pPipeTrav->graphicsPipelineCI), "{DS}")
4081                                     .c_str());
4082        }
4083    }
4084    return skip_call;
4085}
4086
4087static void printCB(layer_data *my_data, const VkCommandBuffer cb) {
4088    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4089    if (pCB && pCB->cmds.size() > 0) {
4090        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4091                DRAWSTATE_NONE, "DS", "Cmds in CB 0x%p", (void *)cb);
4092        vector<CMD_NODE> cmds = pCB->cmds;
4093        for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) {
4094            // TODO : Need to pass cb as srcObj here
4095            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4096                    __LINE__, DRAWSTATE_NONE, "DS", "  CMD 0x%" PRIx64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str());
4097        }
4098    } else {
4099        // Nothing to print
4100    }
4101}
4102
4103static bool synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
4104    bool skip_call = false;
4105    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
4106        return skip_call;
4107    }
4108    skip_call |= printPipeline(my_data, cb);
4109    return skip_call;
4110}
4111
4112// Flags validation error if the associated call is made inside a render pass. The apiName
4113// routine should ONLY be called outside a render pass.
4114static bool insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4115    bool inside = false;
4116    if (pCB->activeRenderPass) {
4117        inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4118                         (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
4119                         "%s: It is invalid to issue this call inside an active render pass (0x%" PRIxLEAST64 ")", apiName,
4120                         (uint64_t)pCB->activeRenderPass->renderPass);
4121    }
4122    return inside;
4123}
4124
4125// Flags validation error if the associated call is made outside a render pass. The apiName
4126// routine should ONLY be called inside a render pass.
4127static bool outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4128    bool outside = false;
4129    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
4130        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
4131         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
4132        outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4133                          (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS",
4134                          "%s: This call must be issued inside an active render pass.", apiName);
4135    }
4136    return outside;
4137}
4138
4139static void init_core_validation(layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
4140
4141    layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
4142
4143}
4144
4145VKAPI_ATTR VkResult VKAPI_CALL
4146CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
4147    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4148
4149    assert(chain_info->u.pLayerInfo);
4150    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4151    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
4152    if (fpCreateInstance == NULL)
4153        return VK_ERROR_INITIALIZATION_FAILED;
4154
4155    // Advance the link info for the next element on the chain
4156    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4157
4158    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
4159    if (result != VK_SUCCESS)
4160        return result;
4161
4162    layer_data *instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
4163    instance_data->instance = *pInstance;
4164    instance_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
4165    layer_init_instance_dispatch_table(*pInstance, instance_data->instance_dispatch_table, fpGetInstanceProcAddr);
4166
4167    instance_data->report_data =
4168        debug_report_create_instance(instance_data->instance_dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount,
4169                                     pCreateInfo->ppEnabledExtensionNames);
4170    init_core_validation(instance_data, pAllocator);
4171
4172    instance_data->instance_state = unique_ptr<INSTANCE_STATE>(new INSTANCE_STATE());
4173    ValidateLayerOrdering(*pCreateInfo);
4174
4175    return result;
4176}
4177
4178/* hook DestroyInstance to remove tableInstanceMap entry */
4179VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
4180    // TODOSC : Shouldn't need any customization here
4181    dispatch_key key = get_dispatch_key(instance);
4182    // TBD: Need any locking this early, in case this function is called at the
4183    // same time by more than one thread?
4184    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
4185    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
4186    pTable->DestroyInstance(instance, pAllocator);
4187
4188    std::lock_guard<std::mutex> lock(global_lock);
4189    // Clean up logging callback, if any
4190    while (my_data->logging_callback.size() > 0) {
4191        VkDebugReportCallbackEXT callback = my_data->logging_callback.back();
4192        layer_destroy_msg_callback(my_data->report_data, callback, pAllocator);
4193        my_data->logging_callback.pop_back();
4194    }
4195
4196    layer_debug_report_destroy_instance(my_data->report_data);
4197    delete my_data->instance_dispatch_table;
4198    layer_data_map.erase(key);
4199}
4200
4201static void checkDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
4202    uint32_t i;
4203    // TBD: Need any locking, in case this function is called at the same time
4204    // by more than one thread?
4205    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4206    dev_data->device_extensions.wsi_enabled = false;
4207
4208    for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4209        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
4210            dev_data->device_extensions.wsi_enabled = true;
4211    }
4212}
4213
4214// Verify that queue family has been properly requested
4215bool ValidateRequestedQueueFamilyProperties(layer_data *dev_data, const VkDeviceCreateInfo *create_info) {
4216    bool skip_call = false;
4217    // First check is app has actually requested queueFamilyProperties
4218    if (!dev_data->physical_device_state) {
4219        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
4220                             0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
4221                             "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
4222    } else if (QUERY_DETAILS != dev_data->physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
4223        // TODO: This is not called out as an invalid use in the spec so make more informative recommendation.
4224        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
4225                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST,
4226                             "DL", "Call to vkCreateDevice() w/o first calling vkGetPhysicalDeviceQueueFamilyProperties().");
4227    } else {
4228        // Check that the requested queue properties are valid
4229        for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++) {
4230            uint32_t requestedIndex = create_info->pQueueCreateInfos[i].queueFamilyIndex;
4231            if (dev_data->queue_family_properties.size() <=
4232                requestedIndex) { // requested index is out of bounds for this physical device
4233                skip_call |= log_msg(
4234                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
4235                    __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
4236                    "Invalid queue create request in vkCreateDevice(). Invalid queueFamilyIndex %u requested.", requestedIndex);
4237            } else if (create_info->pQueueCreateInfos[i].queueCount >
4238                       dev_data->queue_family_properties[requestedIndex]->queueCount) {
4239                skip_call |=
4240                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
4241                            0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
4242                            "Invalid queue create request in vkCreateDevice(). QueueFamilyIndex %u only has %u queues, but "
4243                            "requested queueCount is %u.",
4244                            requestedIndex, dev_data->queue_family_properties[requestedIndex]->queueCount,
4245                            create_info->pQueueCreateInfos[i].queueCount);
4246            }
4247        }
4248    }
4249    return skip_call;
4250}
4251
4252// Verify that features have been queried and that they are available
4253static bool ValidateRequestedFeatures(layer_data *dev_data, const VkPhysicalDeviceFeatures *requested_features) {
4254    bool skip_call = false;
4255
4256    VkBool32 *actual = reinterpret_cast<VkBool32 *>(&(dev_data->physical_device_features));
4257    const VkBool32 *requested = reinterpret_cast<const VkBool32 *>(requested_features);
4258    // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues
4259    //  Need to provide the struct member name with the issue. To do that seems like we'll
4260    //  have to loop through each struct member which should be done w/ codegen to keep in synch.
4261    uint32_t errors = 0;
4262    uint32_t total_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
4263    for (uint32_t i = 0; i < total_bools; i++) {
4264        if (requested[i] > actual[i]) {
4265            // TODO: Add index to struct member name helper to be able to include a feature name
4266            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4267                VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED,
4268                "DL", "While calling vkCreateDevice(), requesting feature #%u in VkPhysicalDeviceFeatures struct, "
4269                "which is not available on this device.",
4270                i);
4271            errors++;
4272        }
4273    }
4274    if (errors && (UNCALLED == dev_data->physical_device_state->vkGetPhysicalDeviceFeaturesState)) {
4275        // If user didn't request features, notify them that they should
4276        // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error
4277        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4278                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED,
4279                             "DL", "You requested features that are unavailable on this device. You should first query feature "
4280                                   "availability by calling vkGetPhysicalDeviceFeatures().");
4281    }
4282    return skip_call;
4283}
4284
4285VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
4286                                            const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
4287    layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
4288    bool skip_call = false;
4289
4290    // Check that any requested features are available
4291    if (pCreateInfo->pEnabledFeatures) {
4292        skip_call |= ValidateRequestedFeatures(my_instance_data, pCreateInfo->pEnabledFeatures);
4293    }
4294    skip_call |= ValidateRequestedQueueFamilyProperties(my_instance_data, pCreateInfo);
4295
4296    if (skip_call) {
4297        return VK_ERROR_VALIDATION_FAILED_EXT;
4298    }
4299
4300    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4301
4302    assert(chain_info->u.pLayerInfo);
4303    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4304    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
4305    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice");
4306    if (fpCreateDevice == NULL) {
4307        return VK_ERROR_INITIALIZATION_FAILED;
4308    }
4309
4310    // Advance the link info for the next element on the chain
4311    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4312
4313    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
4314    if (result != VK_SUCCESS) {
4315        return result;
4316    }
4317
4318    std::unique_lock<std::mutex> lock(global_lock);
4319    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
4320
4321    // Setup device dispatch table
4322    my_device_data->device_dispatch_table = new VkLayerDispatchTable;
4323    layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr);
4324    my_device_data->device = *pDevice;
4325
4326    my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
4327    checkDeviceRegisterExtensions(pCreateInfo, *pDevice);
4328    // Get physical device limits for this device
4329    my_instance_data->instance_dispatch_table->GetPhysicalDeviceProperties(gpu, &(my_device_data->phys_dev_properties.properties));
4330    uint32_t count;
4331    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
4332    my_device_data->phys_dev_properties.queue_family_properties.resize(count);
4333    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(
4334        gpu, &count, &my_device_data->phys_dev_properties.queue_family_properties[0]);
4335    // TODO: device limits should make sure these are compatible
4336    if (pCreateInfo->pEnabledFeatures) {
4337        my_device_data->phys_dev_properties.features = *pCreateInfo->pEnabledFeatures;
4338    } else {
4339        memset(&my_device_data->phys_dev_properties.features, 0, sizeof(VkPhysicalDeviceFeatures));
4340    }
4341    // Store physical device mem limits into device layer_data struct
4342    my_instance_data->instance_dispatch_table->GetPhysicalDeviceMemoryProperties(gpu, &my_device_data->phys_dev_mem_props);
4343    lock.unlock();
4344
4345    ValidateLayerOrdering(*pCreateInfo);
4346
4347    return result;
4348}
4349
4350// prototype
4351static void deleteRenderPasses(layer_data *);
4352VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
4353    // TODOSC : Shouldn't need any customization here
4354    dispatch_key key = get_dispatch_key(device);
4355    layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4356    // Free all the memory
4357    std::unique_lock<std::mutex> lock(global_lock);
4358    deletePipelines(dev_data);
4359    deleteRenderPasses(dev_data);
4360    deleteCommandBuffers(dev_data);
4361    // This will also delete all sets in the pool & remove them from setMap
4362    deletePools(dev_data);
4363    // All sets should be removed
4364    assert(dev_data->setMap.empty());
4365    for (auto del_layout : dev_data->descriptorSetLayoutMap) {
4366        delete del_layout.second;
4367    }
4368    dev_data->descriptorSetLayoutMap.clear();
4369    dev_data->imageViewMap.clear();
4370    dev_data->imageMap.clear();
4371    dev_data->imageSubresourceMap.clear();
4372    dev_data->imageLayoutMap.clear();
4373    dev_data->bufferViewMap.clear();
4374    dev_data->bufferMap.clear();
4375    // Queues persist until device is destroyed
4376    dev_data->queueMap.clear();
4377    lock.unlock();
4378#if MTMERGESOURCE
4379    bool skip_call = false;
4380    lock.lock();
4381    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4382            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()");
4383    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4384            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================");
4385    print_mem_list(dev_data);
4386    printCBList(dev_data);
4387    // Report any memory leaks
4388    DEVICE_MEM_INFO *pInfo = NULL;
4389    if (!dev_data->memObjMap.empty()) {
4390        for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
4391            pInfo = (*ii).second.get();
4392            if (pInfo->alloc_info.allocationSize != 0) {
4393                // Valid Usage: All child objects created on device must have been destroyed prior to destroying device
4394                skip_call |=
4395                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4396                            (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK, "MEM",
4397                            "Mem Object 0x%" PRIx64 " has not been freed. You should clean up this memory by calling "
4398                            "vkFreeMemory(0x%" PRIx64 ") prior to vkDestroyDevice().",
4399                            (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem));
4400            }
4401        }
4402    }
4403    layer_debug_report_destroy_device(device);
4404    lock.unlock();
4405
4406#if DISPATCH_MAP_DEBUG
4407    fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
4408#endif
4409    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4410    if (!skip_call) {
4411        pDisp->DestroyDevice(device, pAllocator);
4412    }
4413#else
4414    dev_data->device_dispatch_table->DestroyDevice(device, pAllocator);
4415#endif
4416    delete dev_data->device_dispatch_table;
4417    layer_data_map.erase(key);
4418}
4419
4420static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4421
4422// This validates that the initial layout specified in the command buffer for
4423// the IMAGE is the same
4424// as the global IMAGE layout
4425static bool ValidateCmdBufImageLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4426    bool skip_call = false;
4427    for (auto cb_image_data : pCB->imageLayoutMap) {
4428        VkImageLayout imageLayout;
4429        if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4430            skip_call |=
4431                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4432                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image 0x%" PRIx64 ".",
4433                        reinterpret_cast<const uint64_t &>(cb_image_data.first));
4434        } else {
4435            if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4436                // TODO: Set memory invalid which is in mem_tracker currently
4437            } else if (imageLayout != cb_image_data.second.initialLayout) {
4438                if (cb_image_data.first.hasSubresource) {
4439                    skip_call |= log_msg(
4440                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4441                        reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4442                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") [sub-resource: aspectMask 0x%X array layer %u, mip level %u], "
4443                        "with layout %s when first use is %s.",
4444                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), cb_image_data.first.subresource.aspectMask,
4445                                cb_image_data.first.subresource.arrayLayer,
4446                                cb_image_data.first.subresource.mipLevel, string_VkImageLayout(imageLayout),
4447                        string_VkImageLayout(cb_image_data.second.initialLayout));
4448                } else {
4449                    skip_call |= log_msg(
4450                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4451                        reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4452                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") with layout %s when "
4453                        "first use is %s.",
4454                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout),
4455                        string_VkImageLayout(cb_image_data.second.initialLayout));
4456                }
4457            }
4458            SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4459        }
4460    }
4461    return skip_call;
4462}
4463
4464// Track which resources are in-flight by atomically incrementing their "in_use" count
4465static bool validateAndIncrementResources(layer_data *my_data, GLOBAL_CB_NODE *pCB) {
4466    bool skip_call = false;
4467
4468    pCB->in_use.fetch_add(1);
4469    my_data->globalInFlightCmdBuffers.insert(pCB->commandBuffer);
4470
4471    for (auto drawDataElement : pCB->drawData) {
4472        for (auto buffer : drawDataElement.buffers) {
4473            auto buffer_node = getBufferNode(my_data, buffer);
4474            if (!buffer_node) {
4475                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4476                                     (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4477                                     "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", (uint64_t)(buffer));
4478            } else {
4479                buffer_node->in_use.fetch_add(1);
4480            }
4481        }
4482    }
4483    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4484        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4485            if (!my_data->setMap.count(set->GetSet())) {
4486                skip_call |=
4487                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4488                            (uint64_t)(set), __LINE__, DRAWSTATE_INVALID_DESCRIPTOR_SET, "DS",
4489                            "Cannot submit cmd buffer using deleted descriptor set 0x%" PRIx64 ".", (uint64_t)(set));
4490            } else {
4491                set->in_use.fetch_add(1);
4492            }
4493        }
4494    }
4495    for (auto event : pCB->events) {
4496        auto event_node = getEventNode(my_data, event);
4497        if (!event_node) {
4498            skip_call |=
4499                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4500                        reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
4501                        "Cannot submit cmd buffer using deleted event 0x%" PRIx64 ".", reinterpret_cast<uint64_t &>(event));
4502        } else {
4503            event_node->in_use.fetch_add(1);
4504        }
4505    }
4506    for (auto event : pCB->writeEventsBeforeWait) {
4507        auto event_node = getEventNode(my_data, event);
4508        if (event_node)
4509            event_node->write_in_use++;
4510    }
4511    return skip_call;
4512}
4513
4514// Note: This function assumes that the global lock is held by the calling
4515// thread.
4516// TODO: untangle this.
4517static bool cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) {
4518    bool skip_call = false;
4519    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
4520    if (pCB) {
4521        for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) {
4522            for (auto event : queryEventsPair.second) {
4523                if (my_data->eventMap[event].needsSignaled) {
4524                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4525                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
4526                                         "Cannot get query results on queryPool 0x%" PRIx64
4527                                         " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
4528                                         (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
4529                }
4530            }
4531        }
4532    }
4533    return skip_call;
4534}
4535
4536// TODO: nuke this completely.
4537// Decrement cmd_buffer in_use and if it goes to 0 remove cmd_buffer from globalInFlightCmdBuffers
4538static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer) {
4539    // Pull it off of global list initially, but if we find it in any other queue list, add it back in
4540    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmd_buffer);
4541    pCB->in_use.fetch_sub(1);
4542    if (!pCB->in_use.load()) {
4543        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
4544    }
4545}
4546
4547
4548static bool RetireWorkOnQueue(layer_data *dev_data, QUEUE_NODE *pQueue, uint64_t seq)
4549{
4550    bool skip_call = false; // TODO: extract everything that might fail to precheck
4551    std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
4552
4553    // Roll this queue forward, one submission at a time.
4554    while (pQueue->seq < seq) {
4555        auto & submission = pQueue->submissions.front();
4556
4557        for (auto & wait : submission.waitSemaphores) {
4558            auto pSemaphore = getSemaphoreNode(dev_data, wait.semaphore);
4559            pSemaphore->in_use.fetch_sub(1);
4560            auto & lastSeq = otherQueueSeqs[wait.queue];
4561            lastSeq = std::max(lastSeq, wait.seq);
4562        }
4563
4564        for (auto & semaphore : submission.signalSemaphores) {
4565            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4566            pSemaphore->in_use.fetch_sub(1);
4567        }
4568
4569        for (auto cb : submission.cbs) {
4570            auto pCB = getCBNode(dev_data, cb);
4571            for (auto drawDataElement : pCB->drawData) {
4572                for (auto buffer : drawDataElement.buffers) {
4573                    auto buffer_node = getBufferNode(dev_data, buffer);
4574                    if (buffer_node) {
4575                        buffer_node->in_use.fetch_sub(1);
4576                    }
4577                }
4578            }
4579            for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4580                for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4581                    set->in_use.fetch_sub(1);
4582                }
4583            }
4584            for (auto event : pCB->events) {
4585                auto eventNode = dev_data->eventMap.find(event);
4586                if (eventNode != dev_data->eventMap.end()) {
4587                    eventNode->second.in_use.fetch_sub(1);
4588                }
4589            }
4590            for (auto event : pCB->writeEventsBeforeWait) {
4591                auto eventNode = dev_data->eventMap.find(event);
4592                if (eventNode != dev_data->eventMap.end()) {
4593                    eventNode->second.write_in_use--;
4594                }
4595            }
4596            for (auto queryStatePair : pCB->queryToStateMap) {
4597                dev_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4598            }
4599            for (auto eventStagePair : pCB->eventToStageMap) {
4600                dev_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4601            }
4602
4603            skip_call |= cleanInFlightCmdBuffer(dev_data, cb);
4604            removeInFlightCmdBuffer(dev_data, cb);
4605        }
4606
4607        auto pFence = getFenceNode(dev_data, submission.fence);
4608        if (pFence) {
4609            pFence->state = FENCE_RETIRED;
4610        }
4611
4612        pQueue->submissions.pop_front();
4613        pQueue->seq++;
4614    }
4615
4616    // Roll other queues forward to the highest seq we saw a wait for
4617    for (auto qs : otherQueueSeqs) {
4618        skip_call |= RetireWorkOnQueue(dev_data, getQueueNode(dev_data, qs.first), qs.second);
4619    }
4620
4621    return skip_call;
4622}
4623
4624
4625// Submit a fence to a queue, delimiting previous fences and previous untracked
4626// work by it.
4627static void
4628SubmitFence(QUEUE_NODE *pQueue, FENCE_NODE *pFence, uint64_t submitCount)
4629{
4630    pFence->state = FENCE_INFLIGHT;
4631    pFence->signaler.first = pQueue->queue;
4632    pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
4633}
4634
4635static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4636    bool skip_call = false;
4637    if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
4638        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4639        skip_call |=
4640            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4641                    __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
4642                    "Command Buffer 0x%" PRIx64 " is already in use and is not marked for simultaneous use.",
4643                    reinterpret_cast<uint64_t>(pCB->commandBuffer));
4644    }
4645    return skip_call;
4646}
4647
4648static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4649    bool skip_call = false;
4650    // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
4651    if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
4652        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4653                             0, __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4654                             "CB 0x%" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
4655                             "set, but has been submitted 0x%" PRIxLEAST64 " times.",
4656                             (uint64_t)(pCB->commandBuffer), pCB->submitCount);
4657    }
4658    // Validate that cmd buffers have been updated
4659    if (CB_RECORDED != pCB->state) {
4660        if (CB_INVALID == pCB->state) {
4661            // Inform app of reason CB invalid
4662            for (auto obj : pCB->broken_bindings) {
4663                const char *type_str = object_type_to_string(obj.type);
4664                // Descriptor sets are a special case that can be either destroyed or updated to invalidated a CB
4665                const char *cause_str =
4666                    (obj.type == VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT) ? "destroyed or updated" : "destroyed";
4667
4668                skip_call |=
4669                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4670                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4671                            "You are submitting command buffer 0x%" PRIxLEAST64 " that is invalid because bound %s 0x%" PRIxLEAST64
4672                            " was %s.",
4673                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), type_str, obj.handle, cause_str);
4674            }
4675        } else { // Flag error for using CB w/o vkEndCommandBuffer() called
4676            skip_call |=
4677                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4678                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
4679                        "You must call vkEndCommandBuffer() on CB 0x%" PRIxLEAST64 " before this call to vkQueueSubmit()!",
4680                        (uint64_t)(pCB->commandBuffer));
4681        }
4682    }
4683    return skip_call;
4684}
4685
4686// Validate that queueFamilyIndices of primary command buffers match this queue
4687// Secondary command buffers were previously validated in vkCmdExecuteCommands().
4688static bool validateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
4689    bool skip_call = false;
4690    auto pPool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool);
4691    auto queue_node = getQueueNode(dev_data, queue);
4692
4693    if (pPool && queue_node && (pPool->queueFamilyIndex != queue_node->queueFamilyIndex)) {
4694        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4695            reinterpret_cast<uint64_t>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
4696            "vkQueueSubmit: Primary command buffer 0x%" PRIxLEAST64
4697            " created in queue family %d is being submitted on queue 0x%" PRIxLEAST64 " from queue family %d.",
4698            reinterpret_cast<uint64_t>(pCB->commandBuffer), pPool->queueFamilyIndex,
4699            reinterpret_cast<uint64_t>(queue), queue_node->queueFamilyIndex);
4700    }
4701
4702    return skip_call;
4703}
4704
4705static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4706    // Track in-use for resources off of primary and any secondary CBs
4707    bool skip_call = false;
4708
4709    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
4710    // on device
4711    skip_call |= validateCommandBufferSimultaneousUse(dev_data, pCB);
4712
4713    skip_call |= validateAndIncrementResources(dev_data, pCB);
4714
4715    if (!pCB->secondaryCommandBuffers.empty()) {
4716        for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
4717            GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
4718            skip_call |= validateAndIncrementResources(dev_data, pSubCB);
4719            if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
4720                !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4721                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4722                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4723                        "CB 0x%" PRIxLEAST64 " was submitted with secondary buffer 0x%" PRIxLEAST64
4724                        " but that buffer has subsequently been bound to "
4725                        "primary cmd buffer 0x%" PRIxLEAST64
4726                        " and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
4727                        reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer),
4728                        reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer));
4729            }
4730        }
4731    }
4732
4733    skip_call |= validateCommandBufferState(dev_data, pCB);
4734
4735    return skip_call;
4736}
4737
4738static bool
4739ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence)
4740{
4741    bool skip_call = false;
4742
4743    if (pFence) {
4744        if (pFence->state == FENCE_INFLIGHT) {
4745            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4746                                 (uint64_t)(pFence->fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
4747                                 "Fence 0x%" PRIx64 " is already in use by another submission.", (uint64_t)(pFence->fence));
4748        }
4749
4750        else if (pFence->state == FENCE_RETIRED) {
4751            skip_call |=
4752                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4753                        reinterpret_cast<uint64_t &>(pFence->fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4754                        "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
4755                        reinterpret_cast<uint64_t &>(pFence->fence));
4756        }
4757    }
4758
4759    return skip_call;
4760}
4761
4762
4763VKAPI_ATTR VkResult VKAPI_CALL
4764QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
4765    bool skip_call = false;
4766    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
4767    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4768    std::unique_lock<std::mutex> lock(global_lock);
4769
4770    auto pQueue = getQueueNode(dev_data, queue);
4771    auto pFence = getFenceNode(dev_data, fence);
4772    skip_call |= ValidateFenceForSubmit(dev_data, pFence);
4773
4774    if (skip_call) {
4775        return VK_ERROR_VALIDATION_FAILED_EXT;
4776    }
4777
4778    // TODO : Review these old print functions and clean up as appropriate
4779    print_mem_list(dev_data);
4780    printCBList(dev_data);
4781
4782    // Mark the fence in-use.
4783    if (pFence) {
4784        SubmitFence(pQueue, pFence, std::max(1u, submitCount));
4785    }
4786
4787    // Now verify each individual submit
4788    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4789        const VkSubmitInfo *submit = &pSubmits[submit_idx];
4790        vector<SEMAPHORE_WAIT> semaphore_waits;
4791        vector<VkSemaphore> semaphore_signals;
4792        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
4793            VkSemaphore semaphore = submit->pWaitSemaphores[i];
4794            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4795            if (pSemaphore) {
4796                if (pSemaphore->signaled) {
4797                    if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
4798                        semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
4799                        pSemaphore->in_use.fetch_add(1);
4800                    }
4801                    pSemaphore->signaler.first = VK_NULL_HANDLE;
4802                    pSemaphore->signaled = false;
4803                } else {
4804                    skip_call |=
4805                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4806                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
4807                                "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
4808                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
4809                }
4810            }
4811        }
4812        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
4813            VkSemaphore semaphore = submit->pSignalSemaphores[i];
4814            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4815            if (pSemaphore) {
4816                if (pSemaphore->signaled) {
4817                    skip_call |=
4818                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4819                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
4820                                "Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
4821                                " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
4822                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore),
4823                                reinterpret_cast<uint64_t &>(pSemaphore->signaler.first));
4824                } else {
4825                    pSemaphore->signaler.first = queue;
4826                    pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
4827                    pSemaphore->signaled = true;
4828                    pSemaphore->in_use.fetch_add(1);
4829                    semaphore_signals.push_back(semaphore);
4830                }
4831            }
4832        }
4833
4834        std::vector<VkCommandBuffer> cbs;
4835
4836        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
4837            auto pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
4838            skip_call |= ValidateCmdBufImageLayouts(dev_data, pCBNode);
4839            if (pCBNode) {
4840                cbs.push_back(submit->pCommandBuffers[i]);
4841                for (auto secondaryCmdBuffer : pCBNode->secondaryCommandBuffers) {
4842                    cbs.push_back(secondaryCmdBuffer);
4843                }
4844
4845                pCBNode->submitCount++; // increment submit count
4846                skip_call |= validatePrimaryCommandBufferState(dev_data, pCBNode);
4847                skip_call |= validateQueueFamilyIndices(dev_data, pCBNode, queue);
4848                // Call submit-time functions to validate/update state
4849                for (auto &function : pCBNode->validate_functions) {
4850                    skip_call |= function();
4851                }
4852                for (auto &function : pCBNode->eventUpdates) {
4853                    skip_call |= function(queue);
4854                }
4855                for (auto &function : pCBNode->queryUpdates) {
4856                    skip_call |= function(queue);
4857                }
4858            }
4859        }
4860
4861        pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals,
4862                                         submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE);
4863    }
4864
4865    if (pFence && !submitCount) {
4866        // If no submissions, but just dropping a fence on the end of the queue,
4867        // record an empty submission with just the fence, so we can determine
4868        // its completion.
4869        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
4870                                         std::vector<SEMAPHORE_WAIT>(),
4871                                         std::vector<VkSemaphore>(),
4872                                         fence);
4873    }
4874
4875    lock.unlock();
4876    if (!skip_call)
4877        result = dev_data->device_dispatch_table->QueueSubmit(queue, submitCount, pSubmits, fence);
4878
4879    return result;
4880}
4881
4882VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
4883                                              const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
4884    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4885    VkResult result = my_data->device_dispatch_table->AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
4886    // TODO : Track allocations and overall size here
4887    std::lock_guard<std::mutex> lock(global_lock);
4888    add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo);
4889    print_mem_list(my_data);
4890    return result;
4891}
4892
4893VKAPI_ATTR void VKAPI_CALL
4894FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
4895    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4896
4897    // From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed.
4898    // Before freeing a memory object, an application must ensure the memory object is no longer
4899    // in use by the device—for example by command buffers queued for execution. The memory need
4900    // not yet be unbound from all images and buffers, but any further use of those images or
4901    // buffers (on host or device) for anything other than destroying those objects will result in
4902    // undefined behavior.
4903
4904    std::unique_lock<std::mutex> lock(global_lock);
4905    bool skip_call = freeMemObjInfo(my_data, device, mem, false);
4906    print_mem_list(my_data);
4907    printCBList(my_data);
4908    lock.unlock();
4909    if (!skip_call) {
4910        my_data->device_dispatch_table->FreeMemory(device, mem, pAllocator);
4911    }
4912}
4913
4914// Validate that given Map memory range is valid. This means that the memory should not already be mapped,
4915//  and that the size of the map range should be:
4916//  1. Not zero
4917//  2. Within the size of the memory allocation
4918static bool ValidateMapMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
4919    bool skip_call = false;
4920
4921    if (size == 0) {
4922        skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4923                            (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4924                            "VkMapMemory: Attempting to map memory range of size zero");
4925    }
4926
4927    auto mem_element = my_data->memObjMap.find(mem);
4928    if (mem_element != my_data->memObjMap.end()) {
4929        auto mem_info = mem_element->second.get();
4930        // It is an application error to call VkMapMemory on an object that is already mapped
4931        if (mem_info->mem_range.size != 0) {
4932            skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4933                                (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4934                                "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIxLEAST64, (uint64_t)mem);
4935        }
4936
4937        // Validate that offset + size is within object's allocationSize
4938        if (size == VK_WHOLE_SIZE) {
4939            if (offset >= mem_info->alloc_info.allocationSize) {
4940                skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4941                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
4942                                    "MEM", "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
4943                                           " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
4944                                    offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
4945            }
4946        } else {
4947            if ((offset + size) > mem_info->alloc_info.allocationSize) {
4948                skip_call =
4949                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4950                            (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4951                            "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64, offset,
4952                            size + offset, mem_info->alloc_info.allocationSize);
4953            }
4954        }
4955    }
4956    return skip_call;
4957}
4958
4959static void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
4960    auto mem_info = getMemObjInfo(my_data, mem);
4961    if (mem_info) {
4962        mem_info->mem_range.offset = offset;
4963        mem_info->mem_range.size = size;
4964    }
4965}
4966
4967static bool deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
4968    bool skip_call = false;
4969    auto mem_info = getMemObjInfo(my_data, mem);
4970    if (mem_info) {
4971        if (!mem_info->mem_range.size) {
4972            // Valid Usage: memory must currently be mapped
4973            skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4974                                (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4975                                "Unmapping Memory without memory being mapped: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
4976        }
4977        mem_info->mem_range.size = 0;
4978        if (mem_info->shadow_copy) {
4979            free(mem_info->shadow_copy_base);
4980            mem_info->shadow_copy_base = 0;
4981            mem_info->shadow_copy = 0;
4982        }
4983    }
4984    return skip_call;
4985}
4986
4987// Guard value for pad data
4988static char NoncoherentMemoryFillValue = 0xb;
4989
4990static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
4991                                     void **ppData) {
4992    auto mem_info = getMemObjInfo(dev_data, mem);
4993    if (mem_info) {
4994        mem_info->p_driver_data = *ppData;
4995        uint32_t index = mem_info->alloc_info.memoryTypeIndex;
4996        if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
4997            mem_info->shadow_copy = 0;
4998        } else {
4999            if (size == VK_WHOLE_SIZE) {
5000                size = mem_info->alloc_info.allocationSize - offset;
5001            }
5002            mem_info->shadow_pad_size = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
5003            assert(vk_safe_modulo(mem_info->shadow_pad_size,
5004                                  dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment) == 0);
5005            // Ensure start of mapped region reflects hardware alignment constraints
5006            uint64_t map_alignment = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
5007
5008            // From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
5009            uint64_t start_offset = offset % map_alignment;
5010            // Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
5011            mem_info->shadow_copy_base = malloc(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset);
5012
5013            mem_info->shadow_copy =
5014                reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
5015                                         ~(map_alignment - 1)) + start_offset;
5016            assert(vk_safe_modulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
5017                                  map_alignment) == 0);
5018
5019            memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, 2 * mem_info->shadow_pad_size + size);
5020            *ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
5021        }
5022    }
5023}
5024
5025// Verify that state for fence being waited on is appropriate. That is,
5026//  a fence being waited on should not already be signaled and
5027//  it should have been submitted on a queue or during acquire next image
5028static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
5029    bool skip_call = false;
5030
5031    auto pFence = getFenceNode(dev_data, fence);
5032    if (pFence) {
5033        if (pFence->state == FENCE_UNSIGNALED) {
5034            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5035                                 reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5036                                 "%s called for fence 0x%" PRIxLEAST64 " which has not been submitted on a Queue or during "
5037                                 "acquire next image.",
5038                                 apiCall, reinterpret_cast<uint64_t &>(fence));
5039        }
5040    }
5041    return skip_call;
5042}
5043
5044VKAPI_ATTR VkResult VKAPI_CALL
5045WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
5046    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5047    bool skip_call = false;
5048    // Verify fence status of submitted fences
5049    std::unique_lock<std::mutex> lock(global_lock);
5050    for (uint32_t i = 0; i < fenceCount; i++) {
5051        skip_call |= verifyWaitFenceState(dev_data, pFences[i], "vkWaitForFences");
5052    }
5053    lock.unlock();
5054    if (skip_call)
5055        return VK_ERROR_VALIDATION_FAILED_EXT;
5056
5057    VkResult result = dev_data->device_dispatch_table->WaitForFences(device, fenceCount, pFences, waitAll, timeout);
5058
5059    if (result == VK_SUCCESS) {
5060        lock.lock();
5061        // When we know that all fences are complete we can clean/remove their CBs
5062        if (waitAll || fenceCount == 1) {
5063            for (uint32_t i = 0; i < fenceCount; i++) {
5064                auto pFence = getFenceNode(dev_data, pFences[i]);
5065                if (pFence->signaler.first != VK_NULL_HANDLE) {
5066                    skip_call |= RetireWorkOnQueue(dev_data,
5067                                                   getQueueNode(dev_data, pFence->signaler.first),
5068                                                   pFence->signaler.second);
5069                }
5070            }
5071        }
5072        // NOTE : Alternate case not handled here is when some fences have completed. In
5073        //  this case for app to guarantee which fences completed it will have to call
5074        //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
5075        lock.unlock();
5076    }
5077    if (skip_call)
5078        return VK_ERROR_VALIDATION_FAILED_EXT;
5079    return result;
5080}
5081
5082VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
5083    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5084    bool skip_call = false;
5085    std::unique_lock<std::mutex> lock(global_lock);
5086    skip_call = verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
5087    lock.unlock();
5088
5089    if (skip_call)
5090        return VK_ERROR_VALIDATION_FAILED_EXT;
5091
5092    VkResult result = dev_data->device_dispatch_table->GetFenceStatus(device, fence);
5093    lock.lock();
5094    if (result == VK_SUCCESS) {
5095        auto pFence = getFenceNode(dev_data, fence);
5096        if (pFence->signaler.first != VK_NULL_HANDLE) {
5097            skip_call |= RetireWorkOnQueue(dev_data,
5098                                           getQueueNode(dev_data, pFence->signaler.first),
5099                                           pFence->signaler.second);
5100        }
5101    }
5102    lock.unlock();
5103    if (skip_call)
5104        return VK_ERROR_VALIDATION_FAILED_EXT;
5105    return result;
5106}
5107
5108VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
5109                                                            VkQueue *pQueue) {
5110    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5111    dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
5112    std::lock_guard<std::mutex> lock(global_lock);
5113
5114    // Add queue to tracking set only if it is new
5115    auto result = dev_data->queues.emplace(*pQueue);
5116    if (result.second == true) {
5117        QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue];
5118        pQNode->queue = *pQueue;
5119        pQNode->queueFamilyIndex = queueFamilyIndex;
5120        pQNode->seq = 0;
5121    }
5122}
5123
5124VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
5125    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5126    bool skip_call = false;
5127    std::unique_lock<std::mutex> lock(global_lock);
5128    auto pQueue = getQueueNode(dev_data, queue);
5129    skip_call |= RetireWorkOnQueue(dev_data, pQueue, pQueue->seq + pQueue->submissions.size());
5130    lock.unlock();
5131    if (skip_call)
5132        return VK_ERROR_VALIDATION_FAILED_EXT;
5133    VkResult result = dev_data->device_dispatch_table->QueueWaitIdle(queue);
5134    return result;
5135}
5136
5137VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
5138    bool skip_call = false;
5139    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5140    std::unique_lock<std::mutex> lock(global_lock);
5141    for (auto & queue : dev_data->queueMap) {
5142        skip_call |= RetireWorkOnQueue(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
5143    }
5144    lock.unlock();
5145    if (skip_call)
5146        return VK_ERROR_VALIDATION_FAILED_EXT;
5147    VkResult result = dev_data->device_dispatch_table->DeviceWaitIdle(device);
5148    return result;
5149}
5150
5151VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
5152    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5153    bool skip_call = false;
5154    std::unique_lock<std::mutex> lock(global_lock);
5155    auto fence_pair = dev_data->fenceMap.find(fence);
5156    if (fence_pair != dev_data->fenceMap.end()) {
5157        if (fence_pair->second.state == FENCE_INFLIGHT) {
5158            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5159                                 (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS", "Fence 0x%" PRIx64 " is in use.",
5160                                 (uint64_t)(fence));
5161        }
5162        dev_data->fenceMap.erase(fence_pair);
5163    }
5164    lock.unlock();
5165
5166    if (!skip_call)
5167        dev_data->device_dispatch_table->DestroyFence(device, fence, pAllocator);
5168}
5169
5170VKAPI_ATTR void VKAPI_CALL
5171DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
5172    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5173
5174    std::unique_lock<std::mutex> lock(global_lock);
5175    auto item = dev_data->semaphoreMap.find(semaphore);
5176    if (item != dev_data->semaphoreMap.end()) {
5177        if (item->second.in_use.load()) {
5178            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5179                    reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
5180                    "Cannot delete semaphore 0x%" PRIx64 " which is in use.", reinterpret_cast<uint64_t &>(semaphore));
5181        }
5182        dev_data->semaphoreMap.erase(semaphore);
5183    }
5184    lock.unlock();
5185    dev_data->device_dispatch_table->DestroySemaphore(device, semaphore, pAllocator);
5186}
5187
5188VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
5189    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5190    bool skip_call = false;
5191    std::unique_lock<std::mutex> lock(global_lock);
5192    auto event_node = getEventNode(dev_data, event);
5193    if (event_node) {
5194        if (event_node->in_use.load()) {
5195            skip_call |= log_msg(
5196                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5197                reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
5198                "Cannot delete event 0x%" PRIx64 " which is in use by a command buffer.", reinterpret_cast<uint64_t &>(event));
5199        }
5200        // Any bound cmd buffers are now invalid
5201        invalidateCommandBuffers(event_node->cb_bindings,
5202                                 {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT});
5203        dev_data->eventMap.erase(event);
5204    }
5205    lock.unlock();
5206    if (!skip_call)
5207        dev_data->device_dispatch_table->DestroyEvent(device, event, pAllocator);
5208}
5209
5210VKAPI_ATTR void VKAPI_CALL
5211DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
5212    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5213    // TODO : Add detection for an in-flight queryPool
5214    std::unique_lock<std::mutex> lock(global_lock);
5215    auto qp_node = getQueryPoolNode(dev_data, queryPool);
5216    if (qp_node) {
5217        // Any bound cmd buffers are now invalid
5218        invalidateCommandBuffers(qp_node->cb_bindings,
5219                                 {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT});
5220        dev_data->queryPoolMap.erase(queryPool);
5221    }
5222    lock.unlock();
5223    dev_data->device_dispatch_table->DestroyQueryPool(device, queryPool, pAllocator);
5224}
5225
5226VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
5227                                                   uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
5228                                                   VkQueryResultFlags flags) {
5229    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5230    unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight;
5231    std::unique_lock<std::mutex> lock(global_lock);
5232    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5233        auto pCB = getCBNode(dev_data, cmdBuffer);
5234        for (auto queryStatePair : pCB->queryToStateMap) {
5235            queriesInFlight[queryStatePair.first].push_back(cmdBuffer);
5236        }
5237    }
5238    bool skip_call = false;
5239    for (uint32_t i = 0; i < queryCount; ++i) {
5240        QueryObject query = {queryPool, firstQuery + i};
5241        auto queryElement = queriesInFlight.find(query);
5242        auto queryToStateElement = dev_data->queryToStateMap.find(query);
5243        if (queryToStateElement != dev_data->queryToStateMap.end()) {
5244            // Available and in flight
5245            if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5246                queryToStateElement->second) {
5247                for (auto cmdBuffer : queryElement->second) {
5248                    auto pCB = getCBNode(dev_data, cmdBuffer);
5249                    auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query);
5250                    if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) {
5251                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5252                                             VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5253                                             "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
5254                                             (uint64_t)(queryPool), firstQuery + i);
5255                    } else {
5256                        for (auto event : queryEventElement->second) {
5257                            dev_data->eventMap[event].needsSignaled = true;
5258                        }
5259                    }
5260                }
5261                // Unavailable and in flight
5262            } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5263                       !queryToStateElement->second) {
5264                // TODO : Can there be the same query in use by multiple command buffers in flight?
5265                bool make_available = false;
5266                for (auto cmdBuffer : queryElement->second) {
5267                    auto pCB = getCBNode(dev_data, cmdBuffer);
5268                    make_available |= pCB->queryToStateMap[query];
5269                }
5270                if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5271                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5272                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5273                                         "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5274                                         (uint64_t)(queryPool), firstQuery + i);
5275                }
5276                // Unavailable
5277            } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) {
5278                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5279                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5280                                     "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5281                                     (uint64_t)(queryPool), firstQuery + i);
5282                // Unitialized
5283            } else if (queryToStateElement == dev_data->queryToStateMap.end()) {
5284                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5285                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5286                                     "Cannot get query results on queryPool 0x%" PRIx64
5287                                     " with index %d as data has not been collected for this index.",
5288                                     (uint64_t)(queryPool), firstQuery + i);
5289            }
5290        }
5291    }
5292    lock.unlock();
5293    if (skip_call)
5294        return VK_ERROR_VALIDATION_FAILED_EXT;
5295    return dev_data->device_dispatch_table->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride,
5296                                                                flags);
5297}
5298
5299static bool validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5300    bool skip_call = false;
5301    auto buffer_node = getBufferNode(my_data, buffer);
5302    if (!buffer_node) {
5303        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5304                             (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5305                             "Cannot free buffer 0x%" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5306    } else {
5307        if (buffer_node->in_use.load()) {
5308            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5309                                 (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
5310                                 "Cannot free buffer 0x%" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer));
5311        }
5312    }
5313    return skip_call;
5314}
5315
5316// Return true if given ranges intersect, else false
5317// Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted
5318//  in an error so not checking that here
5319// pad_ranges bool indicates a linear and non-linear comparison which requires padding
5320// In the case where padding is required, if an alias is encountered then a validation error is reported and skip_call
5321//  may be set by the callback function so caller should merge in skip_call value if padding case is possible.
5322static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip_call) {
5323    *skip_call = false;
5324    auto r1_start = range1->start;
5325    auto r1_end = range1->end;
5326    auto r2_start = range2->start;
5327    auto r2_end = range2->end;
5328    VkDeviceSize pad_align = 1;
5329    if (range1->linear != range2->linear) {
5330        pad_align = dev_data->phys_dev_properties.properties.limits.bufferImageGranularity;
5331    }
5332    if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1)))
5333        return false;
5334    if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1)))
5335        return false;
5336
5337    if (range1->linear != range2->linear) {
5338        // In linear vs. non-linear case, it's an error to alias
5339        const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear";
5340        const char *r1_type_str = range1->image ? "image" : "buffer";
5341        const char *r2_linear_str = range2->linear ? "linear" : "non-linear";
5342        const char *r2_type_str = range2->image ? "image" : "buffer";
5343        auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT;
5344        *skip_call |=
5345            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, range1->handle, 0, MEMTRACK_INVALID_ALIASING,
5346                    "MEM", "%s %s 0x%" PRIx64 " is aliased with %s %s 0x%" PRIx64
5347                           " which is in violation of the Buffer-Image Granularity section of the Vulkan specification.",
5348                    r1_linear_str, r1_type_str, range1->handle, r2_linear_str, r2_type_str, range2->handle);
5349    }
5350    // Ranges intersect
5351    return true;
5352}
5353// Simplified rangesIntersect that calls above function to check range1 for intersection with offset & end addresses
5354static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
5355    // Create a local MEMORY_RANGE struct to wrap offset/size
5356    MEMORY_RANGE range_wrap;
5357    // Synch linear with range1 to avoid padding and potential validation error case
5358    range_wrap.linear = range1->linear;
5359    range_wrap.start = offset;
5360    range_wrap.end = end;
5361    bool tmp_bool;
5362    return rangesIntersect(dev_data, range1, &range_wrap, &tmp_bool);
5363}
5364// For given mem_info, set all ranges valid that intersect [offset-end] range
5365// TODO : For ranges where there is no alias, we may want to create new buffer ranges that are valid
5366static void SetMemRangesValid(layer_data const *dev_data, DEVICE_MEM_INFO *mem_info, VkDeviceSize offset, VkDeviceSize end) {
5367    bool tmp_bool = false;
5368    MEMORY_RANGE map_range;
5369    map_range.linear = true;
5370    map_range.start = offset;
5371    map_range.end = end;
5372    for (auto &handle_range_pair : mem_info->bound_ranges) {
5373        if (rangesIntersect(dev_data, &handle_range_pair.second, &map_range, &tmp_bool)) {
5374            // TODO : WARN here if tmp_bool true?
5375            handle_range_pair.second.valid = true;
5376        }
5377    }
5378}
5379// Object with given handle is being bound to memory w/ given mem_info struct.
5380//  Track the newly bound memory range with given memoryOffset
5381//  Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
5382//  and non-linear range incorrectly overlap.
5383// Return true if an error is flagged and the user callback returns "true", otherwise false
5384// is_image indicates an image object, otherwise handle is for a buffer
5385// is_linear indicates a buffer or linear image
5386static bool InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset,
5387                              VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
5388    bool skip_call = false;
5389    MEMORY_RANGE range;
5390
5391    range.image = is_image;
5392    range.handle = handle;
5393    range.linear = is_linear;
5394    range.valid = mem_info->global_valid;
5395    range.memory = mem_info->mem;
5396    range.start = memoryOffset;
5397    range.size = memRequirements.size;
5398    range.end = memoryOffset + memRequirements.size - 1;
5399    range.aliases.clear();
5400    // Update Memory aliasing
5401    // Save aliase ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we
5402    // inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself
5403    std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges;
5404    for (auto &obj_range_pair : mem_info->bound_ranges) {
5405        auto check_range = &obj_range_pair.second;
5406        bool intersection_error = false;
5407        if (rangesIntersect(dev_data, &range, check_range, &intersection_error)) {
5408            skip_call |= intersection_error;
5409            range.aliases.insert(check_range);
5410            tmp_alias_ranges.insert(check_range);
5411        }
5412    }
5413    mem_info->bound_ranges[handle] = std::move(range);
5414    for (auto tmp_range : tmp_alias_ranges) {
5415        tmp_range->aliases.insert(&mem_info->bound_ranges[handle]);
5416    }
5417    if (is_image)
5418        mem_info->bound_images.insert(handle);
5419    else
5420        mem_info->bound_buffers.insert(handle);
5421
5422    return skip_call;
5423}
5424
5425static bool InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
5426                                   VkMemoryRequirements mem_reqs, bool is_linear) {
5427    return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(image), mem_info, mem_offset, mem_reqs, true, is_linear);
5428}
5429
5430static bool InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
5431                                    VkMemoryRequirements mem_reqs) {
5432    return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(buffer), mem_info, mem_offset, mem_reqs, false, true);
5433}
5434
5435// Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info
5436//  is_image indicates if handle is for image or buffer
5437//  This function will also remove the handle-to-index mapping from the appropriate
5438//  map and clean up any aliases for range being removed.
5439static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) {
5440    auto erase_range = &mem_info->bound_ranges[handle];
5441    for (auto alias_range : erase_range->aliases) {
5442        alias_range->aliases.erase(erase_range);
5443    }
5444    erase_range->aliases.clear();
5445    mem_info->bound_ranges.erase(handle);
5446    if (is_image)
5447        mem_info->bound_images.erase(handle);
5448    else
5449        mem_info->bound_buffers.erase(handle);
5450}
5451
5452static void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
5453
5454static void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
5455
5456VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer,
5457                                         const VkAllocationCallbacks *pAllocator) {
5458    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5459    std::unique_lock<std::mutex> lock(global_lock);
5460    if (!validateIdleBuffer(dev_data, buffer)) {
5461        // Clean up memory binding and range information for buffer
5462        auto buff_node = getBufferNode(dev_data, buffer);
5463        if (buff_node) {
5464            // Any bound cmd buffers are now invalid
5465            invalidateCommandBuffers(buff_node->cb_bindings,
5466                                     {reinterpret_cast<uint64_t &>(buff_node->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT});
5467            auto mem_info = getMemObjInfo(dev_data, buff_node->mem);
5468            if (mem_info) {
5469                RemoveBufferMemoryRange(reinterpret_cast<uint64_t &>(buffer), mem_info);
5470            }
5471            clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5472            dev_data->bufferMap.erase(buff_node->buffer);
5473        }
5474        lock.unlock();
5475        dev_data->device_dispatch_table->DestroyBuffer(device, buffer, pAllocator);
5476    }
5477}
5478
5479VKAPI_ATTR void VKAPI_CALL
5480DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5481    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5482
5483    std::unique_lock<std::mutex> lock(global_lock);
5484    auto item = dev_data->bufferViewMap.find(bufferView);
5485    if (item != dev_data->bufferViewMap.end()) {
5486        dev_data->bufferViewMap.erase(item);
5487    }
5488    lock.unlock();
5489    dev_data->device_dispatch_table->DestroyBufferView(device, bufferView, pAllocator);
5490}
5491
5492VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5493    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5494
5495    std::unique_lock<std::mutex> lock(global_lock);
5496    auto img_node = getImageNode(dev_data, image);
5497    if (img_node) {
5498        // Any bound cmd buffers are now invalid
5499        invalidateCommandBuffers(img_node->cb_bindings,
5500                                 {reinterpret_cast<uint64_t &>(img_node->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT});
5501        // Clean up memory mapping, bindings and range references for image
5502        auto mem_info = getMemObjInfo(dev_data, img_node->mem);
5503        if (mem_info) {
5504            RemoveImageMemoryRange(reinterpret_cast<uint64_t &>(image), mem_info);
5505            clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
5506        }
5507        // Remove image from imageMap
5508        dev_data->imageMap.erase(img_node->image);
5509    }
5510    const auto& subEntry = dev_data->imageSubresourceMap.find(image);
5511    if (subEntry != dev_data->imageSubresourceMap.end()) {
5512        for (const auto& pair : subEntry->second) {
5513            dev_data->imageLayoutMap.erase(pair);
5514        }
5515        dev_data->imageSubresourceMap.erase(subEntry);
5516    }
5517    lock.unlock();
5518    dev_data->device_dispatch_table->DestroyImage(device, image, pAllocator);
5519}
5520
5521static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
5522                                  const char *funcName) {
5523    bool skip_call = false;
5524    if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
5525        skip_call = log_msg(
5526            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5527            reinterpret_cast<const uint64_t &>(mem_info->mem), __LINE__, MEMTRACK_INVALID_MEM_TYPE, "MT",
5528            "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
5529            "type (0x%X) of this memory object 0x%" PRIx64 ".",
5530            funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex, reinterpret_cast<const uint64_t &>(mem_info->mem));
5531    }
5532    return skip_call;
5533}
5534
5535VKAPI_ATTR VkResult VKAPI_CALL
5536BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
5537    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5538    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5539    std::unique_lock<std::mutex> lock(global_lock);
5540    // Track objects tied to memory
5541    uint64_t buffer_handle = reinterpret_cast<uint64_t &>(buffer);
5542    bool skip_call = set_mem_binding(dev_data, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
5543    auto buffer_node = getBufferNode(dev_data, buffer);
5544    if (buffer_node) {
5545        VkMemoryRequirements memRequirements;
5546        dev_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, &memRequirements);
5547        buffer_node->mem = mem;
5548        buffer_node->memOffset = memoryOffset;
5549        buffer_node->memSize = memRequirements.size;
5550
5551        // Track and validate bound memory range information
5552        auto mem_info = getMemObjInfo(dev_data, mem);
5553        if (mem_info) {
5554            skip_call |= InsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, memRequirements);
5555            skip_call |= ValidateMemoryTypes(dev_data, mem_info, memRequirements.memoryTypeBits, "BindBufferMemory");
5556        }
5557
5558        // Validate memory requirements alignment
5559        if (vk_safe_modulo(memoryOffset, memRequirements.alignment) != 0) {
5560            skip_call |=
5561                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
5562                        __LINE__, DRAWSTATE_INVALID_BUFFER_MEMORY_OFFSET, "DS",
5563                        "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the "
5564                        "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
5565                        ", returned from a call to vkGetBufferMemoryRequirements with buffer",
5566                        memoryOffset, memRequirements.alignment);
5567        }
5568
5569        // Validate device limits alignments
5570        static const VkBufferUsageFlagBits usage_list[3] = {
5571            static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
5572            VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
5573            VK_BUFFER_USAGE_STORAGE_BUFFER_BIT};
5574        static const char *memory_type[3] = {"texel",
5575                                             "uniform",
5576                                             "storage"};
5577        static const char *offset_name[3] = {
5578            "minTexelBufferOffsetAlignment",
5579            "minUniformBufferOffsetAlignment",
5580            "minStorageBufferOffsetAlignment"
5581        };
5582
5583        // Keep this one fresh!
5584        const VkDeviceSize offset_requirement[3] = {
5585            dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment,
5586            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
5587            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment
5588        };
5589        VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
5590
5591        for (int i = 0; i < 3; i++) {
5592            if (usage & usage_list[i]) {
5593                if (vk_safe_modulo(memoryOffset, offset_requirement[i]) != 0) {
5594                    skip_call |=
5595                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5596                                0, __LINE__, DRAWSTATE_INVALID_TEXEL_BUFFER_OFFSET, "DS",
5597                                "vkBindBufferMemory(): %s memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5598                                "device limit %s 0x%" PRIxLEAST64,
5599                                memory_type[i], memoryOffset, offset_name[i], offset_requirement[i]);
5600                }
5601            }
5602        }
5603    }
5604    print_mem_list(dev_data);
5605    lock.unlock();
5606    if (!skip_call) {
5607        result = dev_data->device_dispatch_table->BindBufferMemory(device, buffer, mem, memoryOffset);
5608    }
5609    return result;
5610}
5611
5612VKAPI_ATTR void VKAPI_CALL
5613GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
5614    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5615    // TODO : What to track here?
5616    //   Could potentially save returned mem requirements and validate values passed into BindBufferMemory
5617    my_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
5618}
5619
5620VKAPI_ATTR void VKAPI_CALL
5621GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5622    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5623    // TODO : What to track here?
5624    //   Could potentially save returned mem requirements and validate values passed into BindImageMemory
5625    my_data->device_dispatch_table->GetImageMemoryRequirements(device, image, pMemoryRequirements);
5626}
5627
5628VKAPI_ATTR void VKAPI_CALL
5629DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
5630    // TODO : Clean up any internal data structures using this obj.
5631    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5632        ->device_dispatch_table->DestroyImageView(device, imageView, pAllocator);
5633}
5634
5635VKAPI_ATTR void VKAPI_CALL
5636DestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
5637    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5638
5639    std::unique_lock<std::mutex> lock(global_lock);
5640    my_data->shaderModuleMap.erase(shaderModule);
5641    lock.unlock();
5642
5643    my_data->device_dispatch_table->DestroyShaderModule(device, shaderModule, pAllocator);
5644}
5645
5646VKAPI_ATTR void VKAPI_CALL
5647DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
5648    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5649    // TODO : Add detection for in-flight pipeline
5650    std::unique_lock<std::mutex> lock(global_lock);
5651    auto pipe_node = getPipeline(dev_data, pipeline);
5652    if (pipe_node) {
5653        // Any bound cmd buffers are now invalid
5654        invalidateCommandBuffers(pipe_node->cb_bindings,
5655                                 {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT});
5656        dev_data->pipelineMap.erase(pipeline);
5657    }
5658    lock.unlock();
5659    dev_data->device_dispatch_table->DestroyPipeline(device, pipeline, pAllocator);
5660}
5661
5662VKAPI_ATTR void VKAPI_CALL
5663DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
5664    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5665    std::unique_lock<std::mutex> lock(global_lock);
5666    dev_data->pipelineLayoutMap.erase(pipelineLayout);
5667    lock.unlock();
5668
5669    dev_data->device_dispatch_table->DestroyPipelineLayout(device, pipelineLayout, pAllocator);
5670}
5671
5672VKAPI_ATTR void VKAPI_CALL
5673DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
5674    // TODO : Clean up any internal data structures using this obj.
5675    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroySampler(device, sampler, pAllocator);
5676}
5677
5678VKAPI_ATTR void VKAPI_CALL
5679DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
5680    // TODO : Clean up any internal data structures using this obj.
5681    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5682        ->device_dispatch_table->DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
5683}
5684
5685VKAPI_ATTR void VKAPI_CALL
5686DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
5687    // TODO : Clean up any internal data structures using this obj.
5688    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5689        ->device_dispatch_table->DestroyDescriptorPool(device, descriptorPool, pAllocator);
5690}
5691// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip_call result
5692//  If this is a secondary command buffer, then make sure its primary is also in-flight
5693//  If primary is not in-flight, then remove secondary from global in-flight set
5694// This function is only valid at a point when cmdBuffer is being reset or freed
5695static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action) {
5696    bool skip_call = false;
5697    if (dev_data->globalInFlightCmdBuffers.count(cb_node->commandBuffer)) {
5698        // Primary CB or secondary where primary is also in-flight is an error
5699        if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_SECONDARY) ||
5700            (dev_data->globalInFlightCmdBuffers.count(cb_node->primaryCommandBuffer))) {
5701            skip_call |= log_msg(
5702                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5703                reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
5704                "Attempt to %s command buffer (0x%" PRIxLEAST64 ") which is in use.", action,
5705                reinterpret_cast<const uint64_t &>(cb_node->commandBuffer));
5706        }
5707    }
5708    return skip_call;
5709}
5710
5711// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
5712static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action) {
5713    bool skip_call = false;
5714    for (auto cmd_buffer : pPool->commandBuffers) {
5715        if (dev_data->globalInFlightCmdBuffers.count(cmd_buffer)) {
5716            skip_call |= checkCommandBufferInFlight(dev_data, getCBNode(dev_data, cmd_buffer), action);
5717        }
5718    }
5719    return skip_call;
5720}
5721
5722static void clearCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool) {
5723    for (auto cmd_buffer : pPool->commandBuffers) {
5724        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
5725    }
5726}
5727
5728VKAPI_ATTR void VKAPI_CALL
5729FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
5730    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5731    bool skip_call = false;
5732    std::unique_lock<std::mutex> lock(global_lock);
5733
5734    for (uint32_t i = 0; i < commandBufferCount; i++) {
5735        auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
5736        // Delete CB information structure, and remove from commandBufferMap
5737        if (cb_node) {
5738            skip_call |= checkCommandBufferInFlight(dev_data, cb_node, "free");
5739        }
5740    }
5741
5742    if (skip_call)
5743        return;
5744
5745    auto pPool = getCommandPoolNode(dev_data, commandPool);
5746    for (uint32_t i = 0; i < commandBufferCount; i++) {
5747        auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
5748        // Delete CB information structure, and remove from commandBufferMap
5749        if (cb_node) {
5750            dev_data->globalInFlightCmdBuffers.erase(cb_node->commandBuffer);
5751            // reset prior to delete for data clean-up
5752            resetCB(dev_data, cb_node->commandBuffer);
5753            dev_data->commandBufferMap.erase(cb_node->commandBuffer);
5754            delete cb_node;
5755        }
5756
5757        // Remove commandBuffer reference from commandPoolMap
5758        pPool->commandBuffers.remove(pCommandBuffers[i]);
5759    }
5760    printCBList(dev_data);
5761    lock.unlock();
5762
5763    dev_data->device_dispatch_table->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
5764}
5765
5766VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
5767                                                 const VkAllocationCallbacks *pAllocator,
5768                                                 VkCommandPool *pCommandPool) {
5769    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5770
5771    VkResult result = dev_data->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
5772
5773    if (VK_SUCCESS == result) {
5774        std::lock_guard<std::mutex> lock(global_lock);
5775        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
5776        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
5777    }
5778    return result;
5779}
5780
5781VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
5782                                               const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
5783
5784    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5785    VkResult result = dev_data->device_dispatch_table->CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
5786    if (result == VK_SUCCESS) {
5787        std::lock_guard<std::mutex> lock(global_lock);
5788        dev_data->queryPoolMap[*pQueryPool].createInfo = *pCreateInfo;
5789    }
5790    return result;
5791}
5792
5793// Destroy commandPool along with all of the commandBuffers allocated from that pool
5794VKAPI_ATTR void VKAPI_CALL
5795DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
5796    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5797    bool skip_call = false;
5798    std::unique_lock<std::mutex> lock(global_lock);
5799    // Verify that command buffers in pool are complete (not in-flight)
5800    auto pPool = getCommandPoolNode(dev_data, commandPool);
5801    skip_call |= checkCommandBuffersInFlight(dev_data, pPool, "destroy command pool with");
5802
5803    if (skip_call)
5804        return;
5805    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandBufferMap
5806    clearCommandBuffersInFlight(dev_data, pPool);
5807    for (auto cb : pPool->commandBuffers) {
5808        clear_cmd_buf_and_mem_references(dev_data, cb);
5809        auto cb_node = getCBNode(dev_data, cb);
5810        // Remove references to this cb_node prior to delete
5811        // TODO : Need better solution here, resetCB?
5812        for (auto obj : cb_node->object_bindings) {
5813            removeCommandBufferBinding(dev_data, &obj, cb_node);
5814        }
5815        for (auto framebuffer : cb_node->framebuffers) {
5816            auto fb_node = getFramebuffer(dev_data, framebuffer);
5817            if (fb_node)
5818                fb_node->cb_bindings.erase(cb_node);
5819        }
5820        dev_data->commandBufferMap.erase(cb); // Remove this command buffer
5821        delete cb_node;                       // delete CB info structure
5822    }
5823    dev_data->commandPoolMap.erase(commandPool);
5824    lock.unlock();
5825
5826    dev_data->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator);
5827}
5828
5829VKAPI_ATTR VkResult VKAPI_CALL
5830ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
5831    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5832    bool skip_call = false;
5833
5834    std::unique_lock<std::mutex> lock(global_lock);
5835    auto pPool = getCommandPoolNode(dev_data, commandPool);
5836    skip_call |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with");
5837    lock.unlock();
5838
5839    if (skip_call)
5840        return VK_ERROR_VALIDATION_FAILED_EXT;
5841
5842    VkResult result = dev_data->device_dispatch_table->ResetCommandPool(device, commandPool, flags);
5843
5844    // Reset all of the CBs allocated from this pool
5845    if (VK_SUCCESS == result) {
5846        lock.lock();
5847        clearCommandBuffersInFlight(dev_data, pPool);
5848        for (auto cmdBuffer : pPool->commandBuffers) {
5849            resetCB(dev_data, cmdBuffer);
5850        }
5851        lock.unlock();
5852    }
5853    return result;
5854}
5855
5856VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
5857    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5858    bool skip_call = false;
5859    std::unique_lock<std::mutex> lock(global_lock);
5860    for (uint32_t i = 0; i < fenceCount; ++i) {
5861        auto pFence = getFenceNode(dev_data, pFences[i]);
5862        if (pFence && pFence->state == FENCE_INFLIGHT) {
5863            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5864                                 reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5865                                 "Fence 0x%" PRIx64 " is in use.", reinterpret_cast<const uint64_t &>(pFences[i]));
5866        }
5867    }
5868    lock.unlock();
5869
5870    if (skip_call)
5871        return VK_ERROR_VALIDATION_FAILED_EXT;
5872
5873    VkResult result = dev_data->device_dispatch_table->ResetFences(device, fenceCount, pFences);
5874
5875    if (result == VK_SUCCESS) {
5876        lock.lock();
5877        for (uint32_t i = 0; i < fenceCount; ++i) {
5878            auto pFence = getFenceNode(dev_data, pFences[i]);
5879            if (pFence) {
5880                pFence->state = FENCE_UNSIGNALED;
5881            }
5882        }
5883        lock.unlock();
5884    }
5885
5886    return result;
5887}
5888
5889// For given cb_nodes, invalidate them and track object causing invalidation
5890void invalidateCommandBuffers(std::unordered_set<GLOBAL_CB_NODE *> cb_nodes, VK_OBJECT obj) {
5891    for (auto cb_node : cb_nodes) {
5892        cb_node->state = CB_INVALID;
5893        cb_node->broken_bindings.push_back(obj);
5894    }
5895}
5896
5897VKAPI_ATTR void VKAPI_CALL
5898DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
5899    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5900    std::unique_lock<std::mutex> lock(global_lock);
5901    auto fb_node = getFramebuffer(dev_data, framebuffer);
5902    if (fb_node) {
5903        invalidateCommandBuffers(fb_node->cb_bindings,
5904                                 {reinterpret_cast<uint64_t &>(fb_node->framebuffer), VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT});
5905        dev_data->frameBufferMap.erase(fb_node->framebuffer);
5906    }
5907    lock.unlock();
5908    dev_data->device_dispatch_table->DestroyFramebuffer(device, framebuffer, pAllocator);
5909}
5910
5911VKAPI_ATTR void VKAPI_CALL
5912DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
5913    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5914    std::unique_lock<std::mutex> lock(global_lock);
5915    dev_data->renderPassMap.erase(renderPass);
5916    // TODO: leaking all the guts of the renderpass node here!
5917    lock.unlock();
5918    dev_data->device_dispatch_table->DestroyRenderPass(device, renderPass, pAllocator);
5919}
5920
5921VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
5922                                            const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
5923    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5924
5925    VkResult result = dev_data->device_dispatch_table->CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
5926
5927    if (VK_SUCCESS == result) {
5928        std::lock_guard<std::mutex> lock(global_lock);
5929        // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
5930        dev_data->bufferMap.insert(std::make_pair(*pBuffer, unique_ptr<BUFFER_NODE>(new BUFFER_NODE(*pBuffer, pCreateInfo))));
5931    }
5932    return result;
5933}
5934
5935static bool PreCallValidateCreateBufferView(layer_data *dev_data, const VkBufferViewCreateInfo *pCreateInfo) {
5936    bool skip_call = false;
5937    BUFFER_NODE *buf_node = getBufferNode(dev_data, pCreateInfo->buffer);
5938    // If this isn't a sparse buffer, it needs to have memory backing it at CreateBufferView time
5939    if (buf_node) {
5940        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buf_node, "vkCreateBufferView()");
5941        // In order to create a valid buffer view, the buffer must have been created with at least one of the
5942        // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
5943        skip_call |= ValidateBufferUsageFlags(dev_data, buf_node,
5944                                              VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT,
5945                                              false, "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
5946    }
5947    return skip_call;
5948}
5949
5950VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
5951                                                const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
5952    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5953    std::unique_lock<std::mutex> lock(global_lock);
5954    bool skip_call = PreCallValidateCreateBufferView(dev_data, pCreateInfo);
5955    lock.unlock();
5956    if (skip_call)
5957        return VK_ERROR_VALIDATION_FAILED_EXT;
5958    VkResult result = dev_data->device_dispatch_table->CreateBufferView(device, pCreateInfo, pAllocator, pView);
5959    if (VK_SUCCESS == result) {
5960        lock.lock();
5961        dev_data->bufferViewMap[*pView] = unique_ptr<VkBufferViewCreateInfo>(new VkBufferViewCreateInfo(*pCreateInfo));
5962        lock.unlock();
5963    }
5964    return result;
5965}
5966
5967VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
5968                                           const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
5969    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5970
5971    VkResult result = dev_data->device_dispatch_table->CreateImage(device, pCreateInfo, pAllocator, pImage);
5972
5973    if (VK_SUCCESS == result) {
5974        std::lock_guard<std::mutex> lock(global_lock);
5975        IMAGE_LAYOUT_NODE image_node;
5976        image_node.layout = pCreateInfo->initialLayout;
5977        image_node.format = pCreateInfo->format;
5978        dev_data->imageMap.insert(std::make_pair(*pImage, unique_ptr<IMAGE_NODE>(new IMAGE_NODE(*pImage, pCreateInfo))));
5979        ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()};
5980        dev_data->imageSubresourceMap[*pImage].push_back(subpair);
5981        dev_data->imageLayoutMap[subpair] = image_node;
5982    }
5983    return result;
5984}
5985
5986static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
5987    /* expects global_lock to be held by caller */
5988
5989    auto image_node = getImageNode(dev_data, image);
5990    if (image_node) {
5991        /* If the caller used the special values VK_REMAINING_MIP_LEVELS and
5992         * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to
5993         * the actual values.
5994         */
5995        if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
5996            range->levelCount = image_node->createInfo.mipLevels - range->baseMipLevel;
5997        }
5998
5999        if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
6000            range->layerCount = image_node->createInfo.arrayLayers - range->baseArrayLayer;
6001        }
6002    }
6003}
6004
6005// Return the correct layer/level counts if the caller used the special
6006// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
6007static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
6008                                         VkImage image) {
6009    /* expects global_lock to be held by caller */
6010
6011    *levels = range.levelCount;
6012    *layers = range.layerCount;
6013    auto image_node = getImageNode(dev_data, image);
6014    if (image_node) {
6015        if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
6016            *levels = image_node->createInfo.mipLevels - range.baseMipLevel;
6017        }
6018        if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
6019            *layers = image_node->createInfo.arrayLayers - range.baseArrayLayer;
6020        }
6021    }
6022}
6023
6024static bool PreCallValidateCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *pCreateInfo) {
6025    bool skip_call = false;
6026    IMAGE_NODE *image_node = getImageNode(dev_data, pCreateInfo->image);
6027    if (image_node) {
6028        skip_call |= ValidateImageUsageFlags(dev_data, image_node,
6029                                             VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT |
6030                                                 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
6031                                             false, "vkCreateImageView()", "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT]_BIT");
6032        // If this isn't a sparse image, it needs to have memory backing it at CreateImageView time
6033        skip_call |= ValidateMemoryIsBoundToImage(dev_data, image_node, "vkCreateImageView()");
6034    }
6035    return skip_call;
6036}
6037
6038static inline void PostCallRecordCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *pCreateInfo, VkImageView *pView) {
6039    dev_data->imageViewMap[*pView] = unique_ptr<VkImageViewCreateInfo>(new VkImageViewCreateInfo(*pCreateInfo));
6040    ResolveRemainingLevelsLayers(dev_data, &dev_data->imageViewMap[*pView].get()->subresourceRange, pCreateInfo->image);
6041}
6042
6043VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
6044                                               const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
6045    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6046    std::unique_lock<std::mutex> lock(global_lock);
6047    bool skip_call = PreCallValidateCreateImageView(dev_data, pCreateInfo);
6048    lock.unlock();
6049    if (skip_call)
6050        return VK_ERROR_VALIDATION_FAILED_EXT;
6051    VkResult result = dev_data->device_dispatch_table->CreateImageView(device, pCreateInfo, pAllocator, pView);
6052    if (VK_SUCCESS == result) {
6053        lock.lock();
6054        PostCallRecordCreateImageView(dev_data, pCreateInfo, pView);
6055        lock.unlock();
6056    }
6057
6058    return result;
6059}
6060
6061VKAPI_ATTR VkResult VKAPI_CALL
6062CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
6063    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6064    VkResult result = dev_data->device_dispatch_table->CreateFence(device, pCreateInfo, pAllocator, pFence);
6065    if (VK_SUCCESS == result) {
6066        std::lock_guard<std::mutex> lock(global_lock);
6067        auto &fence_node = dev_data->fenceMap[*pFence];
6068        fence_node.fence = *pFence;
6069        fence_node.createInfo = *pCreateInfo;
6070        fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
6071    }
6072    return result;
6073}
6074
6075// TODO handle pipeline caches
6076VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
6077                                                   const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
6078    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6079    VkResult result = dev_data->device_dispatch_table->CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
6080    return result;
6081}
6082
6083VKAPI_ATTR void VKAPI_CALL
6084DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
6085    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6086    dev_data->device_dispatch_table->DestroyPipelineCache(device, pipelineCache, pAllocator);
6087}
6088
6089VKAPI_ATTR VkResult VKAPI_CALL
6090GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
6091    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6092    VkResult result = dev_data->device_dispatch_table->GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
6093    return result;
6094}
6095
6096VKAPI_ATTR VkResult VKAPI_CALL
6097MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
6098    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6099    VkResult result = dev_data->device_dispatch_table->MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
6100    return result;
6101}
6102
6103// utility function to set collective state for pipeline
6104void set_pipeline_state(PIPELINE_NODE *pPipe) {
6105    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
6106    if (pPipe->graphicsPipelineCI.pColorBlendState) {
6107        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
6108            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
6109                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6110                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6111                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6112                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6113                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6114                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6115                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6116                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
6117                    pPipe->blendConstantsEnabled = true;
6118                }
6119            }
6120        }
6121    }
6122}
6123
6124VKAPI_ATTR VkResult VKAPI_CALL
6125CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6126                        const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6127                        VkPipeline *pPipelines) {
6128    VkResult result = VK_SUCCESS;
6129    // TODO What to do with pipelineCache?
6130    // The order of operations here is a little convoluted but gets the job done
6131    //  1. Pipeline create state is first shadowed into PIPELINE_NODE struct
6132    //  2. Create state is then validated (which uses flags setup during shadowing)
6133    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
6134    bool skip_call = false;
6135    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6136    vector<PIPELINE_NODE *> pPipeNode(count);
6137    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6138
6139    uint32_t i = 0;
6140    std::unique_lock<std::mutex> lock(global_lock);
6141
6142    for (i = 0; i < count; i++) {
6143        pPipeNode[i] = new PIPELINE_NODE;
6144        pPipeNode[i]->initGraphicsPipeline(&pCreateInfos[i]);
6145        pPipeNode[i]->render_pass_ci.initialize(getRenderPass(dev_data, pCreateInfos[i].renderPass)->pCreateInfo);
6146        pPipeNode[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
6147
6148        skip_call |= verifyPipelineCreateState(dev_data, device, pPipeNode, i);
6149    }
6150
6151    if (!skip_call) {
6152        lock.unlock();
6153        result = dev_data->device_dispatch_table->CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6154                                                                          pPipelines);
6155        lock.lock();
6156        for (i = 0; i < count; i++) {
6157            pPipeNode[i]->pipeline = pPipelines[i];
6158            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6159        }
6160        lock.unlock();
6161    } else {
6162        for (i = 0; i < count; i++) {
6163            delete pPipeNode[i];
6164        }
6165        lock.unlock();
6166        return VK_ERROR_VALIDATION_FAILED_EXT;
6167    }
6168    return result;
6169}
6170
6171VKAPI_ATTR VkResult VKAPI_CALL
6172CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6173                       const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6174                       VkPipeline *pPipelines) {
6175    VkResult result = VK_SUCCESS;
6176    bool skip_call = false;
6177
6178    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6179    vector<PIPELINE_NODE *> pPipeNode(count);
6180    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6181
6182    uint32_t i = 0;
6183    std::unique_lock<std::mutex> lock(global_lock);
6184    for (i = 0; i < count; i++) {
6185        // TODO: Verify compute stage bits
6186
6187        // Create and initialize internal tracking data structure
6188        pPipeNode[i] = new PIPELINE_NODE;
6189        pPipeNode[i]->initComputePipeline(&pCreateInfos[i]);
6190        pPipeNode[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
6191        // memcpy(&pPipeNode[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo));
6192
6193        // TODO: Add Compute Pipeline Verification
6194        skip_call |= !validate_compute_pipeline(dev_data->report_data, pPipeNode[i], &dev_data->phys_dev_properties.features,
6195                                                dev_data->shaderModuleMap);
6196        // skip_call |= verifyPipelineCreateState(dev_data, device, pPipeNode[i]);
6197    }
6198
6199    if (!skip_call) {
6200        lock.unlock();
6201        result = dev_data->device_dispatch_table->CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6202                                                                         pPipelines);
6203        lock.lock();
6204        for (i = 0; i < count; i++) {
6205            pPipeNode[i]->pipeline = pPipelines[i];
6206            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6207        }
6208        lock.unlock();
6209    } else {
6210        for (i = 0; i < count; i++) {
6211            // Clean up any locally allocated data structures
6212            delete pPipeNode[i];
6213        }
6214        lock.unlock();
6215        return VK_ERROR_VALIDATION_FAILED_EXT;
6216    }
6217    return result;
6218}
6219
6220VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
6221                                             const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
6222    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6223    VkResult result = dev_data->device_dispatch_table->CreateSampler(device, pCreateInfo, pAllocator, pSampler);
6224    if (VK_SUCCESS == result) {
6225        std::lock_guard<std::mutex> lock(global_lock);
6226        dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_NODE>(new SAMPLER_NODE(pSampler, pCreateInfo));
6227    }
6228    return result;
6229}
6230
6231VKAPI_ATTR VkResult VKAPI_CALL
6232CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
6233                          const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
6234    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6235    VkResult result = dev_data->device_dispatch_table->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
6236    if (VK_SUCCESS == result) {
6237        // TODOSC : Capture layout bindings set
6238        std::lock_guard<std::mutex> lock(global_lock);
6239        dev_data->descriptorSetLayoutMap[*pSetLayout] =
6240            new cvdescriptorset::DescriptorSetLayout(dev_data->report_data, pCreateInfo, *pSetLayout);
6241    }
6242    return result;
6243}
6244
6245// Used by CreatePipelineLayout and CmdPushConstants.
6246// Note that the index argument is optional and only used by CreatePipelineLayout.
6247static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
6248                                      const char *caller_name, uint32_t index = 0) {
6249    uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
6250    bool skip_call = false;
6251    // Check that offset + size don't exceed the max.
6252    // Prevent arithetic overflow here by avoiding addition and testing in this order.
6253    if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
6254        // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
6255        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6256            skip_call |=
6257                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6258                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with offset %u and size %u that "
6259                                                              "exceeds this device's maxPushConstantSize of %u.",
6260                        caller_name, index, offset, size, maxPushConstantsSize);
6261        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6262            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6263                                 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that "
6264                                                                       "exceeds this device's maxPushConstantSize of %u.",
6265                                 caller_name, offset, size, maxPushConstantsSize);
6266        } else {
6267            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6268                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6269        }
6270    }
6271    // size needs to be non-zero and a multiple of 4.
6272    if ((size == 0) || ((size & 0x3) != 0)) {
6273        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6274            skip_call |=
6275                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6276                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with "
6277                                                              "size %u. Size must be greater than zero and a multiple of 4.",
6278                        caller_name, index, size);
6279        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6280            skip_call |=
6281                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6282                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
6283                                                              "size %u. Size must be greater than zero and a multiple of 4.",
6284                        caller_name, size);
6285        } else {
6286            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6287                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6288        }
6289    }
6290    // offset needs to be a multiple of 4.
6291    if ((offset & 0x3) != 0) {
6292        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6293            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6294                                 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with "
6295                                                                       "offset %u. Offset must be a multiple of 4.",
6296                                 caller_name, index, offset);
6297        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6298            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6299                                 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
6300                                                                       "offset %u. Offset must be a multiple of 4.",
6301                                 caller_name, offset);
6302        } else {
6303            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6304                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6305        }
6306    }
6307    return skip_call;
6308}
6309
6310VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
6311                                                    const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
6312    bool skip_call = false;
6313    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6314    // Push Constant Range checks
6315    uint32_t i = 0;
6316    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6317        skip_call |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
6318                                               pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
6319        if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
6320            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6321                                 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has no stageFlags set.");
6322        }
6323    }
6324    // Each range has been validated.  Now check for overlap between ranges (if they are good).
6325    if (!skip_call) {
6326        uint32_t i, j;
6327        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6328            for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
6329                const uint32_t minA = pCreateInfo->pPushConstantRanges[i].offset;
6330                const uint32_t maxA = minA + pCreateInfo->pPushConstantRanges[i].size;
6331                const uint32_t minB = pCreateInfo->pPushConstantRanges[j].offset;
6332                const uint32_t maxB = minB + pCreateInfo->pPushConstantRanges[j].size;
6333                if ((minA <= minB && maxA > minB) || (minB <= minA && maxB > minA)) {
6334                    skip_call |=
6335                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6336                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constants with "
6337                                                                      "overlapping ranges: %u:[%u, %u), %u:[%u, %u)",
6338                                i, minA, maxA, j, minB, maxB);
6339                }
6340            }
6341        }
6342    }
6343
6344    if (skip_call)
6345        return VK_ERROR_VALIDATION_FAILED_EXT;
6346
6347    VkResult result = dev_data->device_dispatch_table->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
6348    if (VK_SUCCESS == result) {
6349        std::lock_guard<std::mutex> lock(global_lock);
6350        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
6351        plNode.layout = *pPipelineLayout;
6352        plNode.set_layouts.resize(pCreateInfo->setLayoutCount);
6353        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
6354            plNode.set_layouts[i] = getDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
6355        }
6356        plNode.push_constant_ranges.resize(pCreateInfo->pushConstantRangeCount);
6357        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6358            plNode.push_constant_ranges[i] = pCreateInfo->pPushConstantRanges[i];
6359        }
6360    }
6361    return result;
6362}
6363
6364VKAPI_ATTR VkResult VKAPI_CALL
6365CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
6366                     VkDescriptorPool *pDescriptorPool) {
6367    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6368    VkResult result = dev_data->device_dispatch_table->CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
6369    if (VK_SUCCESS == result) {
6370        // Insert this pool into Global Pool LL at head
6371        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6372                    (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool 0x%" PRIxLEAST64,
6373                    (uint64_t)*pDescriptorPool))
6374            return VK_ERROR_VALIDATION_FAILED_EXT;
6375        DESCRIPTOR_POOL_NODE *pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo);
6376        if (NULL == pNewNode) {
6377            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6378                        (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6379                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()"))
6380                return VK_ERROR_VALIDATION_FAILED_EXT;
6381        } else {
6382            std::lock_guard<std::mutex> lock(global_lock);
6383            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
6384        }
6385    } else {
6386        // Need to do anything if pool create fails?
6387    }
6388    return result;
6389}
6390
6391VKAPI_ATTR VkResult VKAPI_CALL
6392ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
6393    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6394    VkResult result = dev_data->device_dispatch_table->ResetDescriptorPool(device, descriptorPool, flags);
6395    if (VK_SUCCESS == result) {
6396        std::lock_guard<std::mutex> lock(global_lock);
6397        clearDescriptorPool(dev_data, device, descriptorPool, flags);
6398    }
6399    return result;
6400}
6401// Ensure the pool contains enough descriptors and descriptor sets to satisfy
6402// an allocation request. Fills common_data with the total number of descriptors of each type required,
6403// as well as DescriptorSetLayout ptrs used for later update.
6404static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
6405                                                  cvdescriptorset::AllocateDescriptorSetsData *common_data) {
6406    // All state checks for AllocateDescriptorSets is done in single function
6407    return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data->report_data, pAllocateInfo, dev_data, common_data);
6408}
6409// Allocation state was good and call down chain was made so update state based on allocating descriptor sets
6410static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
6411                                                 VkDescriptorSet *pDescriptorSets,
6412                                                 const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
6413    // All the updates are contained in a single cvdescriptorset function
6414    cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
6415                                                   &dev_data->setMap, dev_data);
6416}
6417
6418VKAPI_ATTR VkResult VKAPI_CALL
6419AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
6420    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6421    std::unique_lock<std::mutex> lock(global_lock);
6422    cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
6423    bool skip_call = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
6424    lock.unlock();
6425
6426    if (skip_call)
6427        return VK_ERROR_VALIDATION_FAILED_EXT;
6428
6429    VkResult result = dev_data->device_dispatch_table->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
6430
6431    if (VK_SUCCESS == result) {
6432        lock.lock();
6433        PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
6434        lock.unlock();
6435    }
6436    return result;
6437}
6438// Verify state before freeing DescriptorSets
6439static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
6440                                              const VkDescriptorSet *descriptor_sets) {
6441    bool skip_call = false;
6442    // First make sure sets being destroyed are not currently in-use
6443    for (uint32_t i = 0; i < count; ++i)
6444        skip_call |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
6445
6446    DESCRIPTOR_POOL_NODE *pool_node = getPoolNode(dev_data, pool);
6447    if (pool_node && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_node->createInfo.flags)) {
6448        // Can't Free from a NON_FREE pool
6449        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6450                             reinterpret_cast<uint64_t &>(pool), __LINE__, DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, "DS",
6451                             "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
6452                             "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
6453    }
6454    return skip_call;
6455}
6456// Sets have been removed from the pool so update underlying state
6457static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
6458                                             const VkDescriptorSet *descriptor_sets) {
6459    DESCRIPTOR_POOL_NODE *pool_state = getPoolNode(dev_data, pool);
6460    // Update available descriptor sets in pool
6461    pool_state->availableSets += count;
6462
6463    // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
6464    for (uint32_t i = 0; i < count; ++i) {
6465        auto set_state = dev_data->setMap[descriptor_sets[i]];
6466        uint32_t type_index = 0, descriptor_count = 0;
6467        for (uint32_t j = 0; j < set_state->GetBindingCount(); ++j) {
6468            type_index = static_cast<uint32_t>(set_state->GetTypeFromIndex(j));
6469            descriptor_count = set_state->GetDescriptorCountFromIndex(j);
6470            pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
6471        }
6472        freeDescriptorSet(dev_data, set_state);
6473        pool_state->sets.erase(set_state);
6474    }
6475}
6476
6477VKAPI_ATTR VkResult VKAPI_CALL
6478FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
6479    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6480    // Make sure that no sets being destroyed are in-flight
6481    std::unique_lock<std::mutex> lock(global_lock);
6482    bool skip_call = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6483    lock.unlock();
6484
6485    if (skip_call)
6486        return VK_ERROR_VALIDATION_FAILED_EXT;
6487    VkResult result = dev_data->device_dispatch_table->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
6488    if (VK_SUCCESS == result) {
6489        lock.lock();
6490        PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6491        lock.unlock();
6492    }
6493    return result;
6494}
6495// TODO : This is a Proof-of-concept for core validation architecture
6496//  Really we'll want to break out these functions to separate files but
6497//  keeping it all together here to prove out design
6498// PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
6499static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6500                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6501                                                const VkCopyDescriptorSet *pDescriptorCopies) {
6502    // First thing to do is perform map look-ups.
6503    // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
6504    //  so we can't just do a single map look-up up-front, but do them individually in functions below
6505
6506    // Now make call(s) that validate state, but don't perform state updates in this function
6507    // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
6508    //  namespace which will parse params and make calls into specific class instances
6509    return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
6510                                                         descriptorCopyCount, pDescriptorCopies);
6511}
6512// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
6513static void PostCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6514                                               const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6515                                               const VkCopyDescriptorSet *pDescriptorCopies) {
6516    cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6517                                                 pDescriptorCopies);
6518}
6519
6520VKAPI_ATTR void VKAPI_CALL
6521UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
6522                     uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
6523    // Only map look-up at top level is for device-level layer_data
6524    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6525    std::unique_lock<std::mutex> lock(global_lock);
6526    bool skip_call = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6527                                                         pDescriptorCopies);
6528    lock.unlock();
6529    if (!skip_call) {
6530        dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6531                                                              pDescriptorCopies);
6532        lock.lock();
6533        // Since UpdateDescriptorSets() is void, nothing to check prior to updating state
6534        PostCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6535                                           pDescriptorCopies);
6536    }
6537}
6538
6539VKAPI_ATTR VkResult VKAPI_CALL
6540AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
6541    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6542    VkResult result = dev_data->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
6543    if (VK_SUCCESS == result) {
6544        std::unique_lock<std::mutex> lock(global_lock);
6545        auto pPool = getCommandPoolNode(dev_data, pCreateInfo->commandPool);
6546
6547        if (pPool) {
6548            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
6549                // Add command buffer to its commandPool map
6550                pPool->commandBuffers.push_back(pCommandBuffer[i]);
6551                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
6552                // Add command buffer to map
6553                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
6554                resetCB(dev_data, pCommandBuffer[i]);
6555                pCB->createInfo = *pCreateInfo;
6556                pCB->device = device;
6557            }
6558        }
6559        printCBList(dev_data);
6560        lock.unlock();
6561    }
6562    return result;
6563}
6564
6565VKAPI_ATTR VkResult VKAPI_CALL
6566BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
6567    bool skip_call = false;
6568    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6569    std::unique_lock<std::mutex> lock(global_lock);
6570    // Validate command buffer level
6571    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6572    if (pCB) {
6573        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
6574        if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
6575            skip_call |=
6576                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6577                        (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6578                        "Calling vkBeginCommandBuffer() on active CB 0x%p before it has completed. "
6579                        "You must check CB fence before this call.",
6580                        commandBuffer);
6581        }
6582        clear_cmd_buf_and_mem_references(dev_data, pCB);
6583        if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
6584            // Secondary Command Buffer
6585            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
6586            if (!pInfo) {
6587                skip_call |=
6588                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6589                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6590                            "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must have inheritance info.",
6591                            reinterpret_cast<void *>(commandBuffer));
6592            } else {
6593                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
6594                    if (!pInfo->renderPass) { // renderpass should NOT be null for a Secondary CB
6595                        skip_call |= log_msg(
6596                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6597                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6598                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must specify a valid renderpass parameter.",
6599                            reinterpret_cast<void *>(commandBuffer));
6600                    }
6601                    if (!pInfo->framebuffer) { // framebuffer may be null for a Secondary CB, but this affects perf
6602                        skip_call |= log_msg(
6603                            dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6604                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6605                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) may perform better if a "
6606                            "valid framebuffer parameter is specified.",
6607                            reinterpret_cast<void *>(commandBuffer));
6608                    } else {
6609                        string errorString = "";
6610                        auto framebuffer = getFramebuffer(dev_data, pInfo->framebuffer);
6611                        if (framebuffer) {
6612                            if ((framebuffer->createInfo.renderPass != pInfo->renderPass) &&
6613                                !verify_renderpass_compatibility(dev_data, framebuffer->renderPassCreateInfo.ptr(),
6614                                                                 getRenderPass(dev_data, pInfo->renderPass)->pCreateInfo,
6615                                                                 errorString)) {
6616                                // renderPass that framebuffer was created with must be compatible with local renderPass
6617                                skip_call |= log_msg(
6618                                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6619                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
6620                                    __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
6621                                    "vkBeginCommandBuffer(): Secondary Command "
6622                                    "Buffer (0x%p) renderPass (0x%" PRIxLEAST64 ") is incompatible w/ framebuffer "
6623                                    "(0x%" PRIxLEAST64 ") w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
6624                                    reinterpret_cast<void *>(commandBuffer), reinterpret_cast<const uint64_t &>(pInfo->renderPass),
6625                                    reinterpret_cast<const uint64_t &>(pInfo->framebuffer),
6626                                    reinterpret_cast<uint64_t &>(framebuffer->createInfo.renderPass), errorString.c_str());
6627                            }
6628                            // Connect this framebuffer to this cmdBuffer
6629                            framebuffer->cb_bindings.insert(pCB);
6630                        }
6631                    }
6632                }
6633                if ((pInfo->occlusionQueryEnable == VK_FALSE ||
6634                     dev_data->phys_dev_properties.features.occlusionQueryPrecise == VK_FALSE) &&
6635                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
6636                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6637                                         VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
6638                                         __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6639                                         "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must not have "
6640                                         "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
6641                                         "support precise occlusion queries.",
6642                                         reinterpret_cast<void *>(commandBuffer));
6643                }
6644            }
6645            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
6646                auto renderPass = getRenderPass(dev_data, pInfo->renderPass);
6647                if (renderPass) {
6648                    if (pInfo->subpass >= renderPass->pCreateInfo->subpassCount) {
6649                        skip_call |= log_msg(
6650                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6651                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6652                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must has a subpass index (%d) "
6653                            "that is less than the number of subpasses (%d).",
6654                            (void *)commandBuffer, pInfo->subpass, renderPass->pCreateInfo->subpassCount);
6655                    }
6656                }
6657            }
6658        }
6659        if (CB_RECORDING == pCB->state) {
6660            skip_call |=
6661                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6662                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6663                        "vkBeginCommandBuffer(): Cannot call Begin on CB (0x%" PRIxLEAST64
6664                        ") in the RECORDING state. Must first call vkEndCommandBuffer().",
6665                        (uint64_t)commandBuffer);
6666        } else if (CB_RECORDED == pCB->state || (CB_INVALID == pCB->state && CMD_END == pCB->cmds.back().type)) {
6667            VkCommandPool cmdPool = pCB->createInfo.commandPool;
6668            auto pPool = getCommandPoolNode(dev_data, cmdPool);
6669            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
6670                skip_call |=
6671                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6672                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6673                            "Call to vkBeginCommandBuffer() on command buffer (0x%" PRIxLEAST64
6674                            ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIxLEAST64
6675                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6676                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
6677            }
6678            resetCB(dev_data, commandBuffer);
6679        }
6680        // Set updated state here in case implicit reset occurs above
6681        pCB->state = CB_RECORDING;
6682        pCB->beginInfo = *pBeginInfo;
6683        if (pCB->beginInfo.pInheritanceInfo) {
6684            pCB->inheritanceInfo = *(pCB->beginInfo.pInheritanceInfo);
6685            pCB->beginInfo.pInheritanceInfo = &pCB->inheritanceInfo;
6686            // If we are a secondary command-buffer and inheriting.  Update the items we should inherit.
6687            if ((pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
6688                (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
6689                pCB->activeRenderPass = getRenderPass(dev_data, pCB->beginInfo.pInheritanceInfo->renderPass);
6690                pCB->activeSubpass = pCB->beginInfo.pInheritanceInfo->subpass;
6691                pCB->framebuffers.insert(pCB->beginInfo.pInheritanceInfo->framebuffer);
6692            }
6693        }
6694    } else {
6695        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6696                             (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
6697                             "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB 0x%p!", (void *)commandBuffer);
6698    }
6699    lock.unlock();
6700    if (skip_call) {
6701        return VK_ERROR_VALIDATION_FAILED_EXT;
6702    }
6703    VkResult result = dev_data->device_dispatch_table->BeginCommandBuffer(commandBuffer, pBeginInfo);
6704
6705    return result;
6706}
6707
6708VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
6709    bool skip_call = false;
6710    VkResult result = VK_SUCCESS;
6711    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6712    std::unique_lock<std::mutex> lock(global_lock);
6713    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6714    if (pCB) {
6715        if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) || !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
6716            // This needs spec clarification to update valid usage, see comments in PR:
6717            // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
6718            skip_call |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer");
6719        }
6720        skip_call |= addCmd(dev_data, pCB, CMD_END, "vkEndCommandBuffer()");
6721        for (auto query : pCB->activeQueries) {
6722            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6723                                 DRAWSTATE_INVALID_QUERY, "DS",
6724                                 "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d",
6725                                 (uint64_t)(query.pool), query.index);
6726        }
6727    }
6728    if (!skip_call) {
6729        lock.unlock();
6730        result = dev_data->device_dispatch_table->EndCommandBuffer(commandBuffer);
6731        lock.lock();
6732        if (VK_SUCCESS == result) {
6733            pCB->state = CB_RECORDED;
6734            // Reset CB status flags
6735            pCB->status = 0;
6736            printCB(dev_data, commandBuffer);
6737        }
6738    } else {
6739        result = VK_ERROR_VALIDATION_FAILED_EXT;
6740    }
6741    lock.unlock();
6742    return result;
6743}
6744
6745VKAPI_ATTR VkResult VKAPI_CALL
6746ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
6747    bool skip_call = false;
6748    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6749    std::unique_lock<std::mutex> lock(global_lock);
6750    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6751    VkCommandPool cmdPool = pCB->createInfo.commandPool;
6752    auto pPool = getCommandPoolNode(dev_data, cmdPool);
6753    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
6754        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6755                             (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6756                             "Attempt to reset command buffer (0x%" PRIxLEAST64 ") created from command pool (0x%" PRIxLEAST64
6757                             ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6758                             (uint64_t)commandBuffer, (uint64_t)cmdPool);
6759    }
6760    skip_call |= checkCommandBufferInFlight(dev_data, pCB, "reset");
6761    lock.unlock();
6762    if (skip_call)
6763        return VK_ERROR_VALIDATION_FAILED_EXT;
6764    VkResult result = dev_data->device_dispatch_table->ResetCommandBuffer(commandBuffer, flags);
6765    if (VK_SUCCESS == result) {
6766        lock.lock();
6767        dev_data->globalInFlightCmdBuffers.erase(commandBuffer);
6768        resetCB(dev_data, commandBuffer);
6769        lock.unlock();
6770    }
6771    return result;
6772}
6773
6774VKAPI_ATTR void VKAPI_CALL
6775CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
6776    bool skip_call = false;
6777    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6778    std::unique_lock<std::mutex> lock(global_lock);
6779    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6780    if (pCB) {
6781        skip_call |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
6782        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) {
6783            skip_call |=
6784                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
6785                        (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
6786                        "Incorrectly binding compute pipeline (0x%" PRIxLEAST64 ") during active RenderPass (0x%" PRIxLEAST64 ")",
6787                        (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass->renderPass);
6788        }
6789
6790        PIPELINE_NODE *pPN = getPipeline(dev_data, pipeline);
6791        if (pPN) {
6792            pCB->lastBound[pipelineBindPoint].pipeline = pipeline;
6793            set_cb_pso_status(pCB, pPN);
6794            set_pipeline_state(pPN);
6795        } else {
6796            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
6797                                 (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
6798                                 "Attempt to bind Pipeline 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline));
6799        }
6800        addCommandBufferBinding(&getPipeline(dev_data, pipeline)->cb_bindings,
6801                                {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT}, pCB);
6802    }
6803    lock.unlock();
6804    if (!skip_call)
6805        dev_data->device_dispatch_table->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
6806}
6807
6808VKAPI_ATTR void VKAPI_CALL
6809CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
6810    bool skip_call = false;
6811    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6812    std::unique_lock<std::mutex> lock(global_lock);
6813    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6814    if (pCB) {
6815        skip_call |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
6816        pCB->status |= CBSTATUS_VIEWPORT_SET;
6817        pCB->viewportMask |= ((1u<<viewportCount) - 1u) << firstViewport;
6818    }
6819    lock.unlock();
6820    if (!skip_call)
6821        dev_data->device_dispatch_table->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
6822}
6823
6824VKAPI_ATTR void VKAPI_CALL
6825CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
6826    bool skip_call = false;
6827    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6828    std::unique_lock<std::mutex> lock(global_lock);
6829    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6830    if (pCB) {
6831        skip_call |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
6832        pCB->status |= CBSTATUS_SCISSOR_SET;
6833        pCB->scissorMask |= ((1u<<scissorCount) - 1u) << firstScissor;
6834    }
6835    lock.unlock();
6836    if (!skip_call)
6837        dev_data->device_dispatch_table->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
6838}
6839
6840VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
6841    bool skip_call = false;
6842    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6843    std::unique_lock<std::mutex> lock(global_lock);
6844    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6845    if (pCB) {
6846        skip_call |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
6847        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
6848
6849        PIPELINE_NODE *pPipeTrav = getPipeline(dev_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
6850        if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) {
6851            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
6852                                 reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SET, "DS",
6853                                 "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH "
6854                                 "flag.  This is undefined behavior and could be ignored.");
6855        } else {
6856            skip_call |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, reinterpret_cast<uint64_t &>(commandBuffer), lineWidth);
6857        }
6858    }
6859    lock.unlock();
6860    if (!skip_call)
6861        dev_data->device_dispatch_table->CmdSetLineWidth(commandBuffer, lineWidth);
6862}
6863
6864VKAPI_ATTR void VKAPI_CALL
6865CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
6866    bool skip_call = false;
6867    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6868    std::unique_lock<std::mutex> lock(global_lock);
6869    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6870    if (pCB) {
6871        skip_call |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
6872        pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
6873    }
6874    lock.unlock();
6875    if (!skip_call)
6876        dev_data->device_dispatch_table->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp,
6877                                                         depthBiasSlopeFactor);
6878}
6879
6880VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
6881    bool skip_call = false;
6882    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6883    std::unique_lock<std::mutex> lock(global_lock);
6884    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6885    if (pCB) {
6886        skip_call |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
6887        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
6888    }
6889    lock.unlock();
6890    if (!skip_call)
6891        dev_data->device_dispatch_table->CmdSetBlendConstants(commandBuffer, blendConstants);
6892}
6893
6894VKAPI_ATTR void VKAPI_CALL
6895CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
6896    bool skip_call = false;
6897    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6898    std::unique_lock<std::mutex> lock(global_lock);
6899    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6900    if (pCB) {
6901        skip_call |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
6902        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
6903    }
6904    lock.unlock();
6905    if (!skip_call)
6906        dev_data->device_dispatch_table->CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
6907}
6908
6909VKAPI_ATTR void VKAPI_CALL
6910CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
6911    bool skip_call = false;
6912    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6913    std::unique_lock<std::mutex> lock(global_lock);
6914    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6915    if (pCB) {
6916        skip_call |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
6917        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
6918    }
6919    lock.unlock();
6920    if (!skip_call)
6921        dev_data->device_dispatch_table->CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
6922}
6923
6924VKAPI_ATTR void VKAPI_CALL
6925CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
6926    bool skip_call = false;
6927    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6928    std::unique_lock<std::mutex> lock(global_lock);
6929    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6930    if (pCB) {
6931        skip_call |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
6932        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
6933    }
6934    lock.unlock();
6935    if (!skip_call)
6936        dev_data->device_dispatch_table->CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
6937}
6938
6939VKAPI_ATTR void VKAPI_CALL
6940CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
6941    bool skip_call = false;
6942    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6943    std::unique_lock<std::mutex> lock(global_lock);
6944    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6945    if (pCB) {
6946        skip_call |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
6947        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
6948    }
6949    lock.unlock();
6950    if (!skip_call)
6951        dev_data->device_dispatch_table->CmdSetStencilReference(commandBuffer, faceMask, reference);
6952}
6953
6954VKAPI_ATTR void VKAPI_CALL
6955CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
6956                      uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
6957                      const uint32_t *pDynamicOffsets) {
6958    bool skip_call = false;
6959    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6960    std::unique_lock<std::mutex> lock(global_lock);
6961    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6962    if (pCB) {
6963        if (pCB->state == CB_RECORDING) {
6964            // Track total count of dynamic descriptor types to make sure we have an offset for each one
6965            uint32_t totalDynamicDescriptors = 0;
6966            string errorString = "";
6967            uint32_t lastSetIndex = firstSet + setCount - 1;
6968            if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
6969                pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
6970                pCB->lastBound[pipelineBindPoint].dynamicOffsets.resize(lastSetIndex + 1);
6971            }
6972            auto oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
6973            auto pipeline_layout = getPipelineLayout(dev_data, layout);
6974            for (uint32_t i = 0; i < setCount; i++) {
6975                cvdescriptorset::DescriptorSet *pSet = getSetNode(dev_data, pDescriptorSets[i]);
6976                if (pSet) {
6977                    pCB->lastBound[pipelineBindPoint].uniqueBoundSets.insert(pSet);
6978                    pSet->BindCommandBuffer(pCB);
6979                    pCB->lastBound[pipelineBindPoint].pipeline_layout = *pipeline_layout;
6980                    pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pSet;
6981                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
6982                                         VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6983                                         DRAWSTATE_NONE, "DS", "DS 0x%" PRIxLEAST64 " bound on pipeline %s",
6984                                         (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
6985                    if (!pSet->IsUpdated() && (pSet->GetTotalDescriptorCount() != 0)) {
6986                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
6987                                             VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6988                                             DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
6989                                             "DS 0x%" PRIxLEAST64
6990                                             " bound but it was never updated. You may want to either update it or not bind it.",
6991                                             (uint64_t)pDescriptorSets[i]);
6992                    }
6993                    // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
6994                    if (!verify_set_layout_compatibility(dev_data, pSet, pipeline_layout, i + firstSet, errorString)) {
6995                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6996                                             VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6997                                             DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
6998                                             "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout "
6999                                             "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s",
7000                                             i, i + firstSet, reinterpret_cast<uint64_t &>(layout), errorString.c_str());
7001                    }
7002
7003                    auto setDynamicDescriptorCount = pSet->GetDynamicDescriptorCount();
7004
7005                    pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i].clear();
7006
7007                    if (setDynamicDescriptorCount) {
7008                        // First make sure we won't overstep bounds of pDynamicOffsets array
7009                        if ((totalDynamicDescriptors + setDynamicDescriptorCount) > dynamicOffsetCount) {
7010                            skip_call |=
7011                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7012                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7013                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7014                                        "descriptorSet #%u (0x%" PRIxLEAST64
7015                                        ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
7016                                        "array. There must be one dynamic offset for each dynamic descriptor being bound.",
7017                                        i, (uint64_t)pDescriptorSets[i], pSet->GetDynamicDescriptorCount(),
7018                                        (dynamicOffsetCount - totalDynamicDescriptors));
7019                        } else { // Validate and store dynamic offsets with the set
7020                            // Validate Dynamic Offset Minimums
7021                            uint32_t cur_dyn_offset = totalDynamicDescriptors;
7022                            for (uint32_t d = 0; d < pSet->GetTotalDescriptorCount(); d++) {
7023                                if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
7024                                    if (vk_safe_modulo(
7025                                            pDynamicOffsets[cur_dyn_offset],
7026                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
7027                                        skip_call |= log_msg(
7028                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7029                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7030                                            DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
7031                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7032                                            "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64,
7033                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7034                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
7035                                    }
7036                                    cur_dyn_offset++;
7037                                } else if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
7038                                    if (vk_safe_modulo(
7039                                            pDynamicOffsets[cur_dyn_offset],
7040                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
7041                                        skip_call |= log_msg(
7042                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7043                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7044                                            DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
7045                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7046                                            "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64,
7047                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7048                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
7049                                    }
7050                                    cur_dyn_offset++;
7051                                }
7052                            }
7053
7054                            pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i] =
7055                                std::vector<uint32_t>(pDynamicOffsets + totalDynamicDescriptors,
7056                                                      pDynamicOffsets + totalDynamicDescriptors + setDynamicDescriptorCount);
7057                            // Keep running total of dynamic descriptor count to verify at the end
7058                            totalDynamicDescriptors += setDynamicDescriptorCount;
7059
7060                        }
7061                    }
7062                } else {
7063                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7064                                         VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7065                                         DRAWSTATE_INVALID_SET, "DS", "Attempt to bind DS 0x%" PRIxLEAST64 " that doesn't exist!",
7066                                         (uint64_t)pDescriptorSets[i]);
7067                }
7068                skip_call |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
7069                // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
7070                if (firstSet > 0) { // Check set #s below the first bound set
7071                    for (uint32_t i = 0; i < firstSet; ++i) {
7072                        if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
7073                            !verify_set_layout_compatibility(dev_data, pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i],
7074                                                             pipeline_layout, i, errorString)) {
7075                            skip_call |= log_msg(
7076                                dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7077                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
7078                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
7079                                "DescriptorSetDS 0x%" PRIxLEAST64
7080                                " previously bound as set #%u was disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
7081                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
7082                            pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
7083                        }
7084                    }
7085                }
7086                // Check if newly last bound set invalidates any remaining bound sets
7087                if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
7088                    if (oldFinalBoundSet &&
7089                        !verify_set_layout_compatibility(dev_data, oldFinalBoundSet, pipeline_layout, lastSetIndex, errorString)) {
7090                        auto old_set = oldFinalBoundSet->GetSet();
7091                        skip_call |=
7092                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7093                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<uint64_t &>(old_set), __LINE__,
7094                                    DRAWSTATE_NONE, "DS", "DescriptorSetDS 0x%" PRIxLEAST64
7095                                                          " previously bound as set #%u is incompatible with set 0x%" PRIxLEAST64
7096                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
7097                                                          "disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
7098                                    reinterpret_cast<uint64_t &>(old_set), lastSetIndex,
7099                                    (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
7100                                    lastSetIndex + 1, (uint64_t)layout);
7101                        pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7102                    }
7103                }
7104            }
7105            //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7106            if (totalDynamicDescriptors != dynamicOffsetCount) {
7107                skip_call |=
7108                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7109                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7110                            "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7111                            "is %u. It should exactly match the number of dynamic descriptors.",
7112                            setCount, totalDynamicDescriptors, dynamicOffsetCount);
7113            }
7114        } else {
7115            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
7116        }
7117    }
7118    lock.unlock();
7119    if (!skip_call)
7120        dev_data->device_dispatch_table->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
7121                                                               pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
7122}
7123
7124VKAPI_ATTR void VKAPI_CALL
7125CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
7126    bool skip_call = false;
7127    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7128    // TODO : Somewhere need to verify that IBs have correct usage state flagged
7129    std::unique_lock<std::mutex> lock(global_lock);
7130
7131    auto buff_node = getBufferNode(dev_data, buffer);
7132    auto cb_node = getCBNode(dev_data, commandBuffer);
7133    if (cb_node && buff_node) {
7134        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdBindIndexBuffer()");
7135        std::function<bool()> function = [=]() {
7136            return ValidateBufferMemoryIsValid(dev_data, buff_node, "vkCmdBindIndexBuffer()");
7137        };
7138        cb_node->validate_functions.push_back(function);
7139        skip_call |= addCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
7140        VkDeviceSize offset_align = 0;
7141        switch (indexType) {
7142        case VK_INDEX_TYPE_UINT16:
7143            offset_align = 2;
7144            break;
7145        case VK_INDEX_TYPE_UINT32:
7146            offset_align = 4;
7147            break;
7148        default:
7149            // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
7150            break;
7151        }
7152        if (!offset_align || (offset % offset_align)) {
7153            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7154                                 DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
7155                                 "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
7156                                 offset, string_VkIndexType(indexType));
7157        }
7158        cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND;
7159    } else {
7160        assert(0);
7161    }
7162    lock.unlock();
7163    if (!skip_call)
7164        dev_data->device_dispatch_table->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
7165}
7166
7167void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
7168    uint32_t end = firstBinding + bindingCount;
7169    if (pCB->currentDrawData.buffers.size() < end) {
7170        pCB->currentDrawData.buffers.resize(end);
7171    }
7172    for (uint32_t i = 0; i < bindingCount; ++i) {
7173        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
7174    }
7175}
7176
7177static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
7178
7179VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
7180                                                uint32_t bindingCount, const VkBuffer *pBuffers,
7181                                                const VkDeviceSize *pOffsets) {
7182    bool skip_call = false;
7183    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7184    // TODO : Somewhere need to verify that VBs have correct usage state flagged
7185    std::unique_lock<std::mutex> lock(global_lock);
7186
7187    auto cb_node = getCBNode(dev_data, commandBuffer);
7188    if (cb_node) {
7189        for (uint32_t i = 0; i < bindingCount; ++i) {
7190            auto buff_node = getBufferNode(dev_data, pBuffers[i]);
7191            assert(buff_node);
7192            skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdBindVertexBuffers()");
7193            std::function<bool()> function = [=]() {
7194                return ValidateBufferMemoryIsValid(dev_data, buff_node, "vkCmdBindVertexBuffers()");
7195            };
7196            cb_node->validate_functions.push_back(function);
7197        }
7198        addCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
7199        updateResourceTracking(cb_node, firstBinding, bindingCount, pBuffers);
7200    } else {
7201        skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
7202    }
7203    lock.unlock();
7204    if (!skip_call)
7205        dev_data->device_dispatch_table->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
7206}
7207
7208/* expects global_lock to be held by caller */
7209static bool markStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
7210    bool skip_call = false;
7211
7212    for (auto imageView : pCB->updateImages) {
7213        auto iv_data = getImageViewData(dev_data, imageView);
7214        if (!iv_data)
7215            continue;
7216
7217        auto img_node = getImageNode(dev_data, iv_data->image);
7218        assert(img_node);
7219        std::function<bool()> function = [=]() {
7220            SetImageMemoryValid(dev_data, img_node, true);
7221            return false;
7222        };
7223        pCB->validate_functions.push_back(function);
7224    }
7225    for (auto buffer : pCB->updateBuffers) {
7226        auto buff_node = getBufferNode(dev_data, buffer);
7227        assert(buff_node);
7228        std::function<bool()> function = [=]() {
7229            SetBufferMemoryValid(dev_data, buff_node, true);
7230            return false;
7231        };
7232        pCB->validate_functions.push_back(function);
7233    }
7234    return skip_call;
7235}
7236
7237VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
7238                                   uint32_t firstVertex, uint32_t firstInstance) {
7239    bool skip_call = false;
7240    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7241    std::unique_lock<std::mutex> lock(global_lock);
7242    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7243    if (pCB) {
7244        skip_call |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()");
7245        pCB->drawCount[DRAW]++;
7246        skip_call |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDraw");
7247        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7248        // TODO : Need to pass commandBuffer as srcObj here
7249        skip_call |=
7250            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7251                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW]++);
7252        skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7253        if (!skip_call) {
7254            updateResourceTrackingOnDraw(pCB);
7255        }
7256        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdDraw");
7257    }
7258    lock.unlock();
7259    if (!skip_call)
7260        dev_data->device_dispatch_table->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
7261}
7262
7263VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
7264                                          uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
7265                                                            uint32_t firstInstance) {
7266    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7267    bool skip_call = false;
7268    std::unique_lock<std::mutex> lock(global_lock);
7269    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7270    if (pCB) {
7271        skip_call |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()");
7272        pCB->drawCount[DRAW_INDEXED]++;
7273        skip_call |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexed");
7274        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7275        // TODO : Need to pass commandBuffer as srcObj here
7276        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7277                             VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7278                             "vkCmdDrawIndexed() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++);
7279        skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7280        if (!skip_call) {
7281            updateResourceTrackingOnDraw(pCB);
7282        }
7283        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed");
7284    }
7285    lock.unlock();
7286    if (!skip_call)
7287        dev_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset,
7288                                                        firstInstance);
7289}
7290
7291VKAPI_ATTR void VKAPI_CALL
7292CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7293    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7294    bool skip_call = false;
7295    std::unique_lock<std::mutex> lock(global_lock);
7296
7297    auto cb_node = getCBNode(dev_data, commandBuffer);
7298    auto buff_node = getBufferNode(dev_data, buffer);
7299    if (cb_node && buff_node) {
7300        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdDrawIndirect()");
7301        skip_call |= addCommandBufferBindingBuffer(dev_data, cb_node, buff_node, "vkCmdDrawIndirect()");
7302        skip_call |= addCmd(dev_data, cb_node, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()");
7303        cb_node->drawCount[DRAW_INDIRECT]++;
7304        skip_call |= validate_and_update_draw_state(dev_data, cb_node, false, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndirect");
7305        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, cb_node);
7306        // TODO : Need to pass commandBuffer as srcObj here
7307        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7308                             VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7309                             "vkCmdDrawIndirect() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++);
7310        skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7311        if (!skip_call) {
7312            updateResourceTrackingOnDraw(cb_node);
7313        }
7314        skip_call |= outsideRenderPass(dev_data, cb_node, "vkCmdDrawIndirect()");
7315    } else {
7316        assert(0);
7317    }
7318    lock.unlock();
7319    if (!skip_call)
7320        dev_data->device_dispatch_table->CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
7321}
7322
7323VKAPI_ATTR void VKAPI_CALL
7324CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7325    bool skip_call = false;
7326    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7327    std::unique_lock<std::mutex> lock(global_lock);
7328
7329    auto cb_node = getCBNode(dev_data, commandBuffer);
7330    auto buff_node = getBufferNode(dev_data, buffer);
7331    if (cb_node && buff_node) {
7332        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdDrawIndexedIndirect()");
7333        skip_call |= addCommandBufferBindingBuffer(dev_data, cb_node, buff_node, "vkCmdDrawIndexedIndirect()");
7334        skip_call |= addCmd(dev_data, cb_node, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()");
7335        cb_node->drawCount[DRAW_INDEXED_INDIRECT]++;
7336        skip_call |=
7337            validate_and_update_draw_state(dev_data, cb_node, true, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexedIndirect");
7338        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, cb_node);
7339        // TODO : Need to pass commandBuffer as srcObj here
7340        skip_call |=
7341            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7342                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call 0x%" PRIx64 ", reporting DS state:",
7343                    g_drawCount[DRAW_INDEXED_INDIRECT]++);
7344        skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7345        if (!skip_call) {
7346            updateResourceTrackingOnDraw(cb_node);
7347        }
7348        skip_call |= outsideRenderPass(dev_data, cb_node, "vkCmdDrawIndexedIndirect()");
7349    } else {
7350        assert(0);
7351    }
7352    lock.unlock();
7353    if (!skip_call)
7354        dev_data->device_dispatch_table->CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
7355}
7356
7357VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
7358    bool skip_call = false;
7359    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7360    std::unique_lock<std::mutex> lock(global_lock);
7361    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7362    if (pCB) {
7363        skip_call |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatch");
7364        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7365        skip_call |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()");
7366        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdDispatch");
7367    }
7368    lock.unlock();
7369    if (!skip_call)
7370        dev_data->device_dispatch_table->CmdDispatch(commandBuffer, x, y, z);
7371}
7372
7373VKAPI_ATTR void VKAPI_CALL
7374CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
7375    bool skip_call = false;
7376    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7377    std::unique_lock<std::mutex> lock(global_lock);
7378
7379    auto cb_node = getCBNode(dev_data, commandBuffer);
7380    auto buff_node = getBufferNode(dev_data, buffer);
7381    if (cb_node && buff_node) {
7382        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdDispatchIndirect()");
7383        skip_call |= addCommandBufferBindingBuffer(dev_data, cb_node, buff_node, "vkCmdDispatchIndirect()");
7384        skip_call |=
7385            validate_and_update_draw_state(dev_data, cb_node, false, VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatchIndirect");
7386        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, cb_node);
7387        skip_call |= addCmd(dev_data, cb_node, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()");
7388        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdDispatchIndirect()");
7389    }
7390    lock.unlock();
7391    if (!skip_call)
7392        dev_data->device_dispatch_table->CmdDispatchIndirect(commandBuffer, buffer, offset);
7393}
7394
7395VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
7396                                         uint32_t regionCount, const VkBufferCopy *pRegions) {
7397    bool skip_call = false;
7398    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7399    std::unique_lock<std::mutex> lock(global_lock);
7400
7401    auto cb_node = getCBNode(dev_data, commandBuffer);
7402    auto src_buff_node = getBufferNode(dev_data, srcBuffer);
7403    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
7404    if (cb_node && src_buff_node && dst_buff_node) {
7405        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_node, "vkCmdCopyBuffer()");
7406        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdCopyBuffer()");
7407        // Update bindings between buffers and cmd buffer
7408        skip_call |= addCommandBufferBindingBuffer(dev_data, cb_node, src_buff_node, "vkCmdCopyBuffer()");
7409        skip_call |= addCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node, "vkCmdCopyBuffer()");
7410        // Validate that SRC & DST buffers have correct usage flags set
7411        skip_call |= ValidateBufferUsageFlags(dev_data, src_buff_node, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyBuffer()",
7412                                              "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7413        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyBuffer()",
7414                                              "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7415
7416        std::function<bool()> function = [=]() {
7417            return ValidateBufferMemoryIsValid(dev_data, src_buff_node, "vkCmdCopyBuffer()");
7418        };
7419        cb_node->validate_functions.push_back(function);
7420        function = [=]() {
7421            SetBufferMemoryValid(dev_data, dst_buff_node, true);
7422            return false;
7423        };
7424        cb_node->validate_functions.push_back(function);
7425
7426        skip_call |= addCmd(dev_data, cb_node, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
7427        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBuffer()");
7428    } else {
7429        // Param_checker will flag errors on invalid objects, just assert here as debugging aid
7430        assert(0);
7431    }
7432    lock.unlock();
7433    if (!skip_call)
7434        dev_data->device_dispatch_table->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
7435}
7436
7437static bool VerifySourceImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage srcImage,
7438                                    VkImageSubresourceLayers subLayers, VkImageLayout srcImageLayout) {
7439    bool skip_call = false;
7440
7441    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7442        uint32_t layer = i + subLayers.baseArrayLayer;
7443        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7444        IMAGE_CMD_BUF_LAYOUT_NODE node;
7445        if (!FindLayout(cb_node, srcImage, sub, node)) {
7446            SetLayout(cb_node, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout));
7447            continue;
7448        }
7449        if (node.layout != srcImageLayout) {
7450            // TODO: Improve log message in the next pass
7451            skip_call |=
7452                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7453                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
7454                                                                        "and doesn't match the current layout %s.",
7455                        string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
7456        }
7457    }
7458    if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
7459        if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7460            // TODO : Can we deal with image node from the top of call tree and avoid map look-up here?
7461            auto image_node = getImageNode(dev_data, srcImage);
7462            if (image_node->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
7463                // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7464                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7465                                     (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7466                                     "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
7467            }
7468        } else {
7469            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7470                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be "
7471                                                                       "TRANSFER_SRC_OPTIMAL or GENERAL.",
7472                                 string_VkImageLayout(srcImageLayout));
7473        }
7474    }
7475    return skip_call;
7476}
7477
7478static bool VerifyDestImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage destImage,
7479                                  VkImageSubresourceLayers subLayers, VkImageLayout destImageLayout) {
7480    bool skip_call = false;
7481
7482    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7483        uint32_t layer = i + subLayers.baseArrayLayer;
7484        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7485        IMAGE_CMD_BUF_LAYOUT_NODE node;
7486        if (!FindLayout(cb_node, destImage, sub, node)) {
7487            SetLayout(cb_node, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout));
7488            continue;
7489        }
7490        if (node.layout != destImageLayout) {
7491            skip_call |=
7492                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7493                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
7494                                                                        "doesn't match the current layout %s.",
7495                        string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
7496        }
7497    }
7498    if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
7499        if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7500            auto image_node = getImageNode(dev_data, destImage);
7501            if (image_node->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
7502                // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7503                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7504                                     (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7505                                     "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
7506            }
7507        } else {
7508            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7509                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be "
7510                                                                       "TRANSFER_DST_OPTIMAL or GENERAL.",
7511                                 string_VkImageLayout(destImageLayout));
7512        }
7513    }
7514    return skip_call;
7515}
7516
7517// Test elements of a VkExtent3D structure against alignment constraints contained in another VkExtent3D structure
7518static inline bool IsExtentAligned(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkExtent3D *extent,
7519                                   VkExtent3D *granularity) {
7520    bool valid = true;
7521    auto pPool = getCommandPoolNode(dev_data, cb_node->createInfo.commandPool);
7522    if (pPool) {
7523        granularity = &dev_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].minImageTransferGranularity;
7524        if ((vk_safe_modulo(extent->depth, granularity->depth) != 0) || (vk_safe_modulo(extent->width, granularity->width) != 0) ||
7525            (vk_safe_modulo(extent->height, granularity->height) != 0)) {
7526            valid = false;
7527        }
7528    }
7529    return valid;
7530}
7531
7532// Check elements of a VkOffset3D structure against a queue family's Image Transfer Granularity values
7533static inline bool CheckItgOffset(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkOffset3D *offset, const uint32_t i,
7534                                  const char *function, const char *member) {
7535    bool skip = false;
7536    VkExtent3D granularity = {};
7537    VkExtent3D extent = {};
7538    extent.width = static_cast<uint32_t>(abs(offset->x));
7539    extent.height = static_cast<uint32_t>(abs(offset->y));
7540    extent.depth = static_cast<uint32_t>(abs(offset->z));
7541    if (IsExtentAligned(dev_data, cb_node, &extent, &granularity) == false) {
7542        skip |= log_msg(
7543            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7544            DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS", "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) must respect this command buffer's "
7545                                                        "queue family image transfer granularity (w=%d, h=%d, d=%d).",
7546            function, i, member, offset->x, offset->y, offset->z, granularity.width, granularity.height, granularity.depth);
7547    }
7548    return skip;
7549}
7550
7551// Check elements of a VkExtent3D structure against a queue family's Image Transfer Granularity values
7552static inline bool CheckItgExtent(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkExtent3D *extent, const uint32_t i,
7553                                  const char *function, const char *member) {
7554    bool skip = false;
7555    VkExtent3D granularity = {};
7556    if (IsExtentAligned(dev_data, cb_node, extent, &granularity) == false) {
7557        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7558                        DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7559                        "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) must respect this command buffer's "
7560                        "queue family image transfer granularity (w=%d, h=%d, z=%d).",
7561                        function, i, member, extent->width, extent->height, extent->depth, granularity.width, granularity.height,
7562                        granularity.depth);
7563    }
7564    return skip;
7565}
7566
7567// Check a uint32_t width or stride value against a queue family's Image Transfer Granularity width value
7568static inline bool CheckItgInt(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const uint32_t value, const uint32_t i,
7569                               const char *function, const char *member) {
7570    bool skip = false;
7571    VkExtent3D granularity = {};
7572    VkExtent3D extent = {};
7573    extent.width = value;
7574    if (IsExtentAligned(dev_data, cb_node, &extent, &granularity) == false) {
7575        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7576                        DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7577                        "%s: pRegion[%d].%s (%d) must respect this command buffer's queue family image transfer granularity %d).",
7578                        function, i, member, extent.width, granularity.width);
7579    }
7580    return skip;
7581}
7582
7583// Check a VkDeviceSize value against a queue family's Image Transfer Granularity width value
7584static inline bool CheckItgSize(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkDeviceSize value, const uint32_t i,
7585                                const char *function, const char *member) {
7586    bool skip = false;
7587    VkExtent3D *granularity;
7588    auto pPool = getCommandPoolNode(dev_data, cb_node->createInfo.commandPool);
7589    if (pPool) {
7590        granularity = &dev_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].minImageTransferGranularity;
7591        if (vk_safe_modulo(value, granularity->width) != 0) {
7592            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7593                            DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7594                            "%s: pRegion[%d].%s (%" PRIdLEAST64
7595                            ") must respect this command buffer's queue family image transfer granularity %d).",
7596                            function, i, member, value, granularity->width);
7597        }
7598    }
7599    return skip;
7600}
7601
7602// Check valid usage Image Tranfer Granularity requirements for elements of a VkImageCopy structure
7603static inline bool ValidateCopyImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
7604                                                                    const VkImageCopy *region, const uint32_t i,
7605                                                                    const char *function) {
7606    bool skip = false;
7607    skip |= CheckItgOffset(dev_data, cb_node, &region->srcOffset, i, function, "srcOffset");
7608    skip |= CheckItgOffset(dev_data, cb_node, &region->dstOffset, i, function, "dstOffset");
7609    skip |= CheckItgExtent(dev_data, cb_node, &region->extent, i, function, "extent");
7610    return skip;
7611}
7612
7613// Check valid usage Image Tranfer Granularity requirements for elements of a VkBufferImageCopy structure
7614static inline bool ValidateCopyBufferImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
7615                                                                          const VkBufferImageCopy *region, const uint32_t i,
7616                                                                          const char *function) {
7617    bool skip = false;
7618    skip |= CheckItgSize(dev_data, cb_node, region->bufferOffset, i, function, "bufferOffset");
7619    skip |= CheckItgInt(dev_data, cb_node, region->bufferRowLength, i, function, "bufferRowLength");
7620    skip |= CheckItgInt(dev_data, cb_node, region->bufferImageHeight, i, function, "bufferImageHeight");
7621    skip |= CheckItgOffset(dev_data, cb_node, &region->imageOffset, i, function, "imageOffset");
7622    skip |= CheckItgExtent(dev_data, cb_node, &region->imageExtent, i, function, "imageExtent");
7623    return skip;
7624}
7625
7626VKAPI_ATTR void VKAPI_CALL
7627CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7628             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
7629    bool skip_call = false;
7630    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7631    std::unique_lock<std::mutex> lock(global_lock);
7632
7633    auto cb_node = getCBNode(dev_data, commandBuffer);
7634    auto src_img_node = getImageNode(dev_data, srcImage);
7635    auto dst_img_node = getImageNode(dev_data, dstImage);
7636    if (cb_node && src_img_node && dst_img_node) {
7637        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_img_node, "vkCmdCopyImage()");
7638        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_img_node, "vkCmdCopyImage()");
7639        // Update bindings between images and cmd buffer
7640        skip_call |= addCommandBufferBindingImage(dev_data, cb_node, src_img_node, "vkCmdCopyImage()");
7641        skip_call |= addCommandBufferBindingImage(dev_data, cb_node, dst_img_node, "vkCmdCopyImage()");
7642        // Validate that SRC & DST images have correct usage flags set
7643        skip_call |= ValidateImageUsageFlags(dev_data, src_img_node, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyImage()",
7644                                             "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7645        skip_call |= ValidateImageUsageFlags(dev_data, dst_img_node, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyImage()",
7646                                             "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7647        std::function<bool()> function = [=]() { return ValidateImageMemoryIsValid(dev_data, src_img_node, "vkCmdCopyImage()"); };
7648        cb_node->validate_functions.push_back(function);
7649        function = [=]() {
7650            SetImageMemoryValid(dev_data, dst_img_node, true);
7651            return false;
7652        };
7653        cb_node->validate_functions.push_back(function);
7654
7655        skip_call |= addCmd(dev_data, cb_node, CMD_COPYIMAGE, "vkCmdCopyImage()");
7656        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImage()");
7657        for (uint32_t i = 0; i < regionCount; ++i) {
7658            skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].srcSubresource, srcImageLayout);
7659            skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].dstSubresource, dstImageLayout);
7660            skip_call |= ValidateCopyImageTransferGranularityRequirements(dev_data, cb_node, &pRegions[i], i, "vkCmdCopyImage()");
7661        }
7662    } else {
7663        assert(0);
7664    }
7665    lock.unlock();
7666    if (!skip_call)
7667        dev_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7668                                                      regionCount, pRegions);
7669}
7670
7671VKAPI_ATTR void VKAPI_CALL
7672CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7673             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
7674    bool skip_call = false;
7675    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7676    std::unique_lock<std::mutex> lock(global_lock);
7677
7678    auto cb_node = getCBNode(dev_data, commandBuffer);
7679    auto src_img_node = getImageNode(dev_data, srcImage);
7680    auto dst_img_node = getImageNode(dev_data, dstImage);
7681    if (cb_node && src_img_node && dst_img_node) {
7682        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_img_node, "vkCmdBlitImage()");
7683        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_img_node, "vkCmdBlitImage()");
7684        // Update bindings between images and cmd buffer
7685        skip_call |= addCommandBufferBindingImage(dev_data, cb_node, src_img_node, "vkCmdBlitImage()");
7686        skip_call |= addCommandBufferBindingImage(dev_data, cb_node, dst_img_node, "vkCmdBlitImage()");
7687        // Validate that SRC & DST images have correct usage flags set
7688        skip_call |= ValidateImageUsageFlags(dev_data, src_img_node, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "vkCmdBlitImage()",
7689                                             "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7690        skip_call |= ValidateImageUsageFlags(dev_data, dst_img_node, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "vkCmdBlitImage()",
7691                                             "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7692        std::function<bool()> function = [=]() { return ValidateImageMemoryIsValid(dev_data, src_img_node, "vkCmdBlitImage()"); };
7693        cb_node->validate_functions.push_back(function);
7694        function = [=]() {
7695            SetImageMemoryValid(dev_data, dst_img_node, true);
7696            return false;
7697        };
7698        cb_node->validate_functions.push_back(function);
7699
7700        skip_call |= addCmd(dev_data, cb_node, CMD_BLITIMAGE, "vkCmdBlitImage()");
7701        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdBlitImage()");
7702    } else {
7703        assert(0);
7704    }
7705    lock.unlock();
7706    if (!skip_call)
7707        dev_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7708                                                      regionCount, pRegions, filter);
7709}
7710
7711VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
7712                                                VkImage dstImage, VkImageLayout dstImageLayout,
7713                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
7714    bool skip_call = false;
7715    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7716    std::unique_lock<std::mutex> lock(global_lock);
7717
7718    auto cb_node = getCBNode(dev_data, commandBuffer);
7719    auto src_buff_node = getBufferNode(dev_data, srcBuffer);
7720    auto dst_img_node = getImageNode(dev_data, dstImage);
7721    if (cb_node && src_buff_node && dst_img_node) {
7722        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_node, "vkCmdCopyBufferToImage()");
7723        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_img_node, "vkCmdCopyBufferToImage()");
7724        skip_call |= addCommandBufferBindingBuffer(dev_data, cb_node, src_buff_node, "vkCmdCopyBufferToImage()");
7725        skip_call |= addCommandBufferBindingImage(dev_data, cb_node, dst_img_node, "vkCmdCopyBufferToImage()");
7726        skip_call |= ValidateBufferUsageFlags(dev_data, src_buff_node, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
7727                                              "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7728        skip_call |= ValidateImageUsageFlags(dev_data, dst_img_node, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7729                                             "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7730        std::function<bool()> function = [=]() {
7731            SetImageMemoryValid(dev_data, dst_img_node, true);
7732            return false;
7733        };
7734        cb_node->validate_functions.push_back(function);
7735        function = [=]() { return ValidateBufferMemoryIsValid(dev_data, src_buff_node, "vkCmdCopyBufferToImage()"); };
7736        cb_node->validate_functions.push_back(function);
7737
7738        skip_call |= addCmd(dev_data, cb_node, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
7739        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBufferToImage()");
7740        for (uint32_t i = 0; i < regionCount; ++i) {
7741            skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].imageSubresource, dstImageLayout);
7742            skip_call |=
7743                ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, &pRegions[i], i, "vkCmdCopyBufferToImage()");
7744        }
7745    } else {
7746        assert(0);
7747    }
7748    lock.unlock();
7749    if (!skip_call)
7750        dev_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount,
7751                                                              pRegions);
7752}
7753
7754VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
7755                                                VkImageLayout srcImageLayout, VkBuffer dstBuffer,
7756                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
7757    bool skip_call = false;
7758    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7759    std::unique_lock<std::mutex> lock(global_lock);
7760
7761    auto cb_node = getCBNode(dev_data, commandBuffer);
7762    auto src_img_node = getImageNode(dev_data, srcImage);
7763    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
7764    if (cb_node && src_img_node && dst_buff_node) {
7765        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_img_node, "vkCmdCopyImageToBuffer()");
7766        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdCopyImageToBuffer()");
7767        // Update bindings between buffer/image and cmd buffer
7768        skip_call |= addCommandBufferBindingImage(dev_data, cb_node, src_img_node, "vkCmdCopyImageToBuffer()");
7769        skip_call |= addCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node, "vkCmdCopyImageToBuffer()");
7770        // Validate that SRC image & DST buffer have correct usage flags set
7771        skip_call |= ValidateImageUsageFlags(dev_data, src_img_node, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7772                                             "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7773        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7774                                              "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7775        std::function<bool()> function = [=]() {
7776            return ValidateImageMemoryIsValid(dev_data, src_img_node, "vkCmdCopyImageToBuffer()");
7777        };
7778        cb_node->validate_functions.push_back(function);
7779        function = [=]() {
7780            SetBufferMemoryValid(dev_data, dst_buff_node, true);
7781            return false;
7782        };
7783        cb_node->validate_functions.push_back(function);
7784
7785        skip_call |= addCmd(dev_data, cb_node, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
7786        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImageToBuffer()");
7787        for (uint32_t i = 0; i < regionCount; ++i) {
7788            skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].imageSubresource, srcImageLayout);
7789            skip_call |=
7790                ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, &pRegions[i], i, "CmdCopyImageToBuffer");
7791        }
7792    } else {
7793        assert(0);
7794    }
7795    lock.unlock();
7796    if (!skip_call)
7797        dev_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount,
7798                                                              pRegions);
7799}
7800
7801VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
7802                                           VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
7803    bool skip_call = false;
7804    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7805    std::unique_lock<std::mutex> lock(global_lock);
7806
7807    auto cb_node = getCBNode(dev_data, commandBuffer);
7808    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
7809    if (cb_node && dst_buff_node) {
7810        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdUpdateBuffer()");
7811        // Update bindings between buffer and cmd buffer
7812        skip_call |= addCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node, "vkCmdUpdateBuffer()");
7813        // Validate that DST buffer has correct usage flags set
7814        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7815                                              "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7816        std::function<bool()> function = [=]() {
7817            SetBufferMemoryValid(dev_data, dst_buff_node, true);
7818            return false;
7819        };
7820        cb_node->validate_functions.push_back(function);
7821
7822        skip_call |= addCmd(dev_data, cb_node, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
7823        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyUpdateBuffer()");
7824    } else {
7825        assert(0);
7826    }
7827    lock.unlock();
7828    if (!skip_call)
7829        dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
7830}
7831
7832VKAPI_ATTR void VKAPI_CALL
7833CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
7834    bool skip_call = false;
7835    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7836    std::unique_lock<std::mutex> lock(global_lock);
7837
7838    auto cb_node = getCBNode(dev_data, commandBuffer);
7839    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
7840    if (cb_node && dst_buff_node) {
7841        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdFillBuffer()");
7842        // Update bindings between buffer and cmd buffer
7843        skip_call |= addCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node, "vkCmdFillBuffer()");
7844        // Validate that DST buffer has correct usage flags set
7845        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdFillBuffer()",
7846                                              "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7847        std::function<bool()> function = [=]() {
7848            SetBufferMemoryValid(dev_data, dst_buff_node, true);
7849            return false;
7850        };
7851        cb_node->validate_functions.push_back(function);
7852
7853        skip_call |= addCmd(dev_data, cb_node, CMD_FILLBUFFER, "vkCmdFillBuffer()");
7854        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyFillBuffer()");
7855    } else {
7856        assert(0);
7857    }
7858    lock.unlock();
7859    if (!skip_call)
7860        dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
7861}
7862
7863VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
7864                                               const VkClearAttachment *pAttachments, uint32_t rectCount,
7865                                               const VkClearRect *pRects) {
7866    bool skip_call = false;
7867    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7868    std::unique_lock<std::mutex> lock(global_lock);
7869    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7870    if (pCB) {
7871        skip_call |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
7872        // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
7873        if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
7874            (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
7875            // TODO : commandBuffer should be srcObj
7876            // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
7877            // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
7878            // call CmdClearAttachments
7879            // Otherwise this seems more like a performance warning.
7880            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7881                                 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
7882                                 "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
7883                                 " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
7884                                 (uint64_t)(commandBuffer));
7885        }
7886        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments()");
7887    }
7888
7889    // Validate that attachment is in reference list of active subpass
7890    if (pCB->activeRenderPass) {
7891        const VkRenderPassCreateInfo *pRPCI = pCB->activeRenderPass->pCreateInfo;
7892        const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
7893
7894        for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) {
7895            const VkClearAttachment *attachment = &pAttachments[attachment_idx];
7896            if (attachment->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
7897                bool found = false;
7898                for (uint32_t i = 0; i < pSD->colorAttachmentCount; i++) {
7899                    if (attachment->colorAttachment == pSD->pColorAttachments[i].attachment) {
7900                        found = true;
7901                        break;
7902                    }
7903                }
7904                if (!found) {
7905                    skip_call |= log_msg(
7906                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7907                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
7908                        "vkCmdClearAttachments() attachment index %d not found in attachment reference array of active subpass %d",
7909                        attachment->colorAttachment, pCB->activeSubpass);
7910                }
7911            } else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
7912                if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
7913                    (pSD->pDepthStencilAttachment->attachment ==
7914                     VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
7915
7916                    skip_call |= log_msg(
7917                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7918                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
7919                        "vkCmdClearAttachments() attachment index %d does not match depthStencilAttachment.attachment (%d) found "
7920                        "in active subpass %d",
7921                        attachment->colorAttachment,
7922                        (pSD->pDepthStencilAttachment) ? pSD->pDepthStencilAttachment->attachment : VK_ATTACHMENT_UNUSED,
7923                        pCB->activeSubpass);
7924                }
7925            }
7926        }
7927    }
7928    lock.unlock();
7929    if (!skip_call)
7930        dev_data->device_dispatch_table->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
7931}
7932
7933VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
7934                                              VkImageLayout imageLayout, const VkClearColorValue *pColor,
7935                                              uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
7936    bool skip_call = false;
7937    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7938    std::unique_lock<std::mutex> lock(global_lock);
7939    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
7940
7941    auto cb_node = getCBNode(dev_data, commandBuffer);
7942    auto img_node = getImageNode(dev_data, image);
7943    if (cb_node && img_node) {
7944        skip_call |= ValidateMemoryIsBoundToImage(dev_data, img_node, "vkCmdClearColorImage()");
7945        skip_call |= addCommandBufferBindingImage(dev_data, cb_node, img_node, "vkCmdClearColorImage()");
7946        std::function<bool()> function = [=]() {
7947            SetImageMemoryValid(dev_data, img_node, true);
7948            return false;
7949        };
7950        cb_node->validate_functions.push_back(function);
7951
7952        skip_call |= addCmd(dev_data, cb_node, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
7953        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdClearColorImage()");
7954    } else {
7955        assert(0);
7956    }
7957    lock.unlock();
7958    if (!skip_call)
7959        dev_data->device_dispatch_table->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
7960}
7961
7962VKAPI_ATTR void VKAPI_CALL
7963CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
7964                          const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
7965                          const VkImageSubresourceRange *pRanges) {
7966    bool skip_call = false;
7967    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7968    std::unique_lock<std::mutex> lock(global_lock);
7969    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
7970
7971    auto cb_node = getCBNode(dev_data, commandBuffer);
7972    auto img_node = getImageNode(dev_data, image);
7973    if (cb_node && img_node) {
7974        skip_call |= ValidateMemoryIsBoundToImage(dev_data, img_node, "vkCmdClearDepthStencilImage()");
7975        skip_call |= addCommandBufferBindingImage(dev_data, cb_node, img_node, "vkCmdClearDepthStencilImage()");
7976        std::function<bool()> function = [=]() {
7977            SetImageMemoryValid(dev_data, img_node, true);
7978            return false;
7979        };
7980        cb_node->validate_functions.push_back(function);
7981
7982        skip_call |= addCmd(dev_data, cb_node, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
7983        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdClearDepthStencilImage()");
7984    } else {
7985        assert(0);
7986    }
7987    lock.unlock();
7988    if (!skip_call)
7989        dev_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount,
7990                                                                   pRanges);
7991}
7992
7993VKAPI_ATTR void VKAPI_CALL
7994CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7995                VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
7996    bool skip_call = false;
7997    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7998    std::unique_lock<std::mutex> lock(global_lock);
7999
8000    auto cb_node = getCBNode(dev_data, commandBuffer);
8001    auto src_img_node = getImageNode(dev_data, srcImage);
8002    auto dst_img_node = getImageNode(dev_data, dstImage);
8003    if (cb_node && src_img_node && dst_img_node) {
8004        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_img_node, "vkCmdResolveImage()");
8005        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_img_node, "vkCmdResolveImage()");
8006        // Update bindings between images and cmd buffer
8007        skip_call |= addCommandBufferBindingImage(dev_data, cb_node, src_img_node, "vkCmdCopyImage()");
8008        skip_call |= addCommandBufferBindingImage(dev_data, cb_node, dst_img_node, "vkCmdCopyImage()");
8009        std::function<bool()> function = [=]() {
8010            return ValidateImageMemoryIsValid(dev_data, src_img_node, "vkCmdResolveImage()");
8011        };
8012        cb_node->validate_functions.push_back(function);
8013        function = [=]() {
8014            SetImageMemoryValid(dev_data, dst_img_node, true);
8015            return false;
8016        };
8017        cb_node->validate_functions.push_back(function);
8018
8019        skip_call |= addCmd(dev_data, cb_node, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
8020        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdResolveImage()");
8021    } else {
8022        assert(0);
8023    }
8024    lock.unlock();
8025    if (!skip_call)
8026        dev_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
8027                                                         regionCount, pRegions);
8028}
8029
8030bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8031    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8032    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8033    if (pCB) {
8034        pCB->eventToStageMap[event] = stageMask;
8035    }
8036    auto queue_data = dev_data->queueMap.find(queue);
8037    if (queue_data != dev_data->queueMap.end()) {
8038        queue_data->second.eventToStageMap[event] = stageMask;
8039    }
8040    return false;
8041}
8042
8043VKAPI_ATTR void VKAPI_CALL
8044CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8045    bool skip_call = false;
8046    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8047    std::unique_lock<std::mutex> lock(global_lock);
8048    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8049    if (pCB) {
8050        skip_call |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
8051        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent");
8052        auto event_node = getEventNode(dev_data, event);
8053        if (event_node) {
8054            addCommandBufferBinding(&event_node->cb_bindings,
8055                                    {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB);
8056            event_node->cb_bindings.insert(pCB);
8057        }
8058        pCB->events.push_back(event);
8059        if (!pCB->waitedEvents.count(event)) {
8060            pCB->writeEventsBeforeWait.push_back(event);
8061        }
8062        std::function<bool(VkQueue)> eventUpdate =
8063            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
8064        pCB->eventUpdates.push_back(eventUpdate);
8065    }
8066    lock.unlock();
8067    if (!skip_call)
8068        dev_data->device_dispatch_table->CmdSetEvent(commandBuffer, event, stageMask);
8069}
8070
8071VKAPI_ATTR void VKAPI_CALL
8072CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8073    bool skip_call = false;
8074    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8075    std::unique_lock<std::mutex> lock(global_lock);
8076    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8077    if (pCB) {
8078        skip_call |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
8079        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent");
8080        auto event_node = getEventNode(dev_data, event);
8081        if (event_node) {
8082            addCommandBufferBinding(&event_node->cb_bindings,
8083                                    {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB);
8084            event_node->cb_bindings.insert(pCB);
8085        }
8086        pCB->events.push_back(event);
8087        if (!pCB->waitedEvents.count(event)) {
8088            pCB->writeEventsBeforeWait.push_back(event);
8089        }
8090        std::function<bool(VkQueue)> eventUpdate =
8091            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
8092        pCB->eventUpdates.push_back(eventUpdate);
8093    }
8094    lock.unlock();
8095    if (!skip_call)
8096        dev_data->device_dispatch_table->CmdResetEvent(commandBuffer, event, stageMask);
8097}
8098
8099static bool TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8100                                   const VkImageMemoryBarrier *pImgMemBarriers) {
8101    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8102    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8103    bool skip = false;
8104    uint32_t levelCount = 0;
8105    uint32_t layerCount = 0;
8106
8107    for (uint32_t i = 0; i < memBarrierCount; ++i) {
8108        auto mem_barrier = &pImgMemBarriers[i];
8109        if (!mem_barrier)
8110            continue;
8111        // TODO: Do not iterate over every possibility - consolidate where
8112        // possible
8113        ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
8114
8115        for (uint32_t j = 0; j < levelCount; j++) {
8116            uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
8117            for (uint32_t k = 0; k < layerCount; k++) {
8118                uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
8119                VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer};
8120                IMAGE_CMD_BUF_LAYOUT_NODE node;
8121                if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
8122                    SetLayout(pCB, mem_barrier->image, sub,
8123                              IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout));
8124                    continue;
8125                }
8126                if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
8127                    // TODO: Set memory invalid which is in mem_tracker currently
8128                } else if (node.layout != mem_barrier->oldLayout) {
8129                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8130                                    __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s "
8131                                                                                    "when current layout is %s.",
8132                                    string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
8133                }
8134                SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
8135            }
8136        }
8137    }
8138    return skip;
8139}
8140
8141// Print readable FlagBits in FlagMask
8142static std::string string_VkAccessFlags(VkAccessFlags accessMask) {
8143    std::string result;
8144    std::string separator;
8145
8146    if (accessMask == 0) {
8147        result = "[None]";
8148    } else {
8149        result = "[";
8150        for (auto i = 0; i < 32; i++) {
8151            if (accessMask & (1 << i)) {
8152                result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
8153                separator = " | ";
8154            }
8155        }
8156        result = result + "]";
8157    }
8158    return result;
8159}
8160
8161// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
8162// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
8163// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
8164static bool ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8165                             const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits,
8166                             const char *type) {
8167    bool skip_call = false;
8168
8169    if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
8170        if (accessMask & ~(required_bit | optional_bits)) {
8171            // TODO: Verify against Valid Use
8172            skip_call |=
8173                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8174                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
8175                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8176        }
8177    } else {
8178        if (!required_bit) {
8179            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8180                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
8181                                                                  "%s when layout is %s, unless the app has previously added a "
8182                                                                  "barrier for this transition.",
8183                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
8184                                 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
8185        } else {
8186            std::string opt_bits;
8187            if (optional_bits != 0) {
8188                std::stringstream ss;
8189                ss << optional_bits;
8190                opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
8191            }
8192            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8193                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
8194                                                                  "layout is %s, unless the app has previously added a barrier for "
8195                                                                  "this transition.",
8196                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
8197                                 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
8198        }
8199    }
8200    return skip_call;
8201}
8202
8203static bool ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8204                                        const VkImageLayout &layout, const char *type) {
8205    bool skip_call = false;
8206    switch (layout) {
8207    case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
8208        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
8209                                      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type);
8210        break;
8211    }
8212    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
8213        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
8214                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type);
8215        break;
8216    }
8217    case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
8218        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
8219        break;
8220    }
8221    case VK_IMAGE_LAYOUT_PREINITIALIZED: {
8222        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type);
8223        break;
8224    }
8225    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
8226        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8227                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8228        break;
8229    }
8230    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
8231        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8232                                      VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8233        break;
8234    }
8235    case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
8236        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
8237        break;
8238    }
8239    case VK_IMAGE_LAYOUT_UNDEFINED: {
8240        if (accessMask != 0) {
8241            // TODO: Verify against Valid Use section spec
8242            skip_call |=
8243                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8244                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
8245                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8246        }
8247        break;
8248    }
8249    case VK_IMAGE_LAYOUT_GENERAL:
8250    default: { break; }
8251    }
8252    return skip_call;
8253}
8254
8255static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8256                             const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
8257                             const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
8258                             const VkImageMemoryBarrier *pImageMemBarriers) {
8259    bool skip_call = false;
8260    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8261    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8262    if (pCB->activeRenderPass && memBarrierCount) {
8263        if (!pCB->activeRenderPass->hasSelfDependency[pCB->activeSubpass]) {
8264            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8265                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
8266                                                                  "with no self dependency specified.",
8267                                 funcName, pCB->activeSubpass);
8268        }
8269    }
8270    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
8271        auto mem_barrier = &pImageMemBarriers[i];
8272        auto image_data = getImageNode(dev_data, mem_barrier->image);
8273        if (image_data) {
8274            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
8275            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
8276            if (image_data->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
8277                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
8278                // be VK_QUEUE_FAMILY_IGNORED
8279                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
8280                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8281                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8282                                         "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
8283                                         "VK_SHARING_MODE_CONCURRENT.  Src and dst "
8284                                         " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
8285                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8286                }
8287            } else {
8288                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
8289                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
8290                // or both be a valid queue family
8291                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
8292                    (src_q_f_index != dst_q_f_index)) {
8293                    skip_call |=
8294                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8295                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
8296                                                                     "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
8297                                                                     "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
8298                                                                     "must be.",
8299                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8300                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
8301                           ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
8302                            (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) {
8303                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8304                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8305                                         "%s: Image 0x%" PRIx64 " was created with sharingMode "
8306                                         "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
8307                                         " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
8308                                         "queueFamilies crated for this device.",
8309                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index,
8310                                         dst_q_f_index, dev_data->phys_dev_properties.queue_family_properties.size());
8311                }
8312            }
8313        }
8314
8315        if (mem_barrier) {
8316            skip_call |=
8317                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
8318            skip_call |=
8319                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
8320            if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
8321                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8322                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
8323                                                         "PREINITIALIZED.",
8324                        funcName);
8325            }
8326            auto image_data = getImageNode(dev_data, mem_barrier->image);
8327            VkFormat format = VK_FORMAT_UNDEFINED;
8328            uint32_t arrayLayers = 0, mipLevels = 0;
8329            bool imageFound = false;
8330            if (image_data) {
8331                format = image_data->createInfo.format;
8332                arrayLayers = image_data->createInfo.arrayLayers;
8333                mipLevels = image_data->createInfo.mipLevels;
8334                imageFound = true;
8335            } else if (dev_data->device_extensions.wsi_enabled) {
8336                auto imageswap_data = getSwapchainFromImage(dev_data, mem_barrier->image);
8337                if (imageswap_data) {
8338                    auto swapchain_data = getSwapchainNode(dev_data, imageswap_data);
8339                    if (swapchain_data) {
8340                        format = swapchain_data->createInfo.imageFormat;
8341                        arrayLayers = swapchain_data->createInfo.imageArrayLayers;
8342                        mipLevels = 1;
8343                        imageFound = true;
8344                    }
8345                }
8346            }
8347            if (imageFound) {
8348                auto aspect_mask = mem_barrier->subresourceRange.aspectMask;
8349                if (vk_format_is_depth_or_stencil(format)) {
8350                    if (vk_format_is_depth_and_stencil(format)) {
8351                        if (!(aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) && !(aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
8352                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8353                                    __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
8354                                    "%s: Image is a depth and stencil format and thus must "
8355                                    "have either one or both of VK_IMAGE_ASPECT_DEPTH_BIT and "
8356                                    "VK_IMAGE_ASPECT_STENCIL_BIT set.",
8357                                    funcName);
8358                        }
8359                    } else if (vk_format_is_depth_only(format)) {
8360                        if (!(aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT)) {
8361                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8362                                    __LINE__, DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth-only format and thus must "
8363                                                                               "have VK_IMAGE_ASPECT_DEPTH_BIT set.",
8364                                    funcName);
8365                        }
8366                    } else { // stencil-only case
8367                        if (!(aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
8368                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8369                                    __LINE__, DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a stencil-only format and thus must "
8370                                                                               "have VK_IMAGE_ASPECT_STENCIL_BIT set.",
8371                                    funcName);
8372                        }
8373                    }
8374                } else { // image is a color format
8375                    if (!(aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT)) {
8376                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8377                                DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a color format and thus must "
8378                                                                 "have VK_IMAGE_ASPECT_COLOR_BIT set.",
8379                                funcName);
8380                    }
8381                }
8382                int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
8383                                     ? 1
8384                                     : mem_barrier->subresourceRange.layerCount;
8385                if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
8386                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8387                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
8388                                                             "baseArrayLayer (%d) and layerCount (%d) be less "
8389                                                             "than or equal to the total number of layers (%d).",
8390                            funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount,
8391                            arrayLayers);
8392                }
8393                int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
8394                                     ? 1
8395                                     : mem_barrier->subresourceRange.levelCount;
8396                if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
8397                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8398                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
8399                                                             "(%d) and levelCount (%d) be less than or equal to "
8400                                                             "the total number of levels (%d).",
8401                            funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount,
8402                            mipLevels);
8403                }
8404            }
8405        }
8406    }
8407    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
8408        auto mem_barrier = &pBufferMemBarriers[i];
8409        if (pCB->activeRenderPass) {
8410            skip_call |=
8411                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8412                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
8413        }
8414        if (!mem_barrier)
8415            continue;
8416
8417        // Validate buffer barrier queue family indices
8418        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8419             mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
8420            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8421             mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) {
8422            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8423                                 DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8424                                 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
8425                                 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
8426                                 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8427                                 dev_data->phys_dev_properties.queue_family_properties.size());
8428        }
8429
8430        auto buffer_node = getBufferNode(dev_data, mem_barrier->buffer);
8431        if (buffer_node) {
8432            auto buffer_size = buffer_node->memSize;
8433            if (mem_barrier->offset >= buffer_size) {
8434                skip_call |= log_msg(
8435                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8436                    DRAWSTATE_INVALID_BARRIER, "DS",
8437                    "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
8438                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8439                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(buffer_size));
8440            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
8441                skip_call |= log_msg(
8442                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8443                    DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
8444                                                     " whose sum is greater than total size 0x%" PRIx64 ".",
8445                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8446                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(mem_barrier->size),
8447                    reinterpret_cast<const uint64_t &>(buffer_size));
8448            }
8449        }
8450    }
8451    return skip_call;
8452}
8453
8454bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) {
8455    bool skip_call = false;
8456    VkPipelineStageFlags stageMask = 0;
8457    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
8458    for (uint32_t i = 0; i < eventCount; ++i) {
8459        auto event = pCB->events[firstEventIndex + i];
8460        auto queue_data = dev_data->queueMap.find(queue);
8461        if (queue_data == dev_data->queueMap.end())
8462            return false;
8463        auto event_data = queue_data->second.eventToStageMap.find(event);
8464        if (event_data != queue_data->second.eventToStageMap.end()) {
8465            stageMask |= event_data->second;
8466        } else {
8467            auto global_event_data = getEventNode(dev_data, event);
8468            if (!global_event_data) {
8469                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
8470                                     reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
8471                                     "Event 0x%" PRIx64 " cannot be waited on if it has never been set.",
8472                                     reinterpret_cast<const uint64_t &>(event));
8473            } else {
8474                stageMask |= global_event_data->stageMask;
8475            }
8476        }
8477    }
8478    // TODO: Need to validate that host_bit is only set if set event is called
8479    // but set event can be called at any time.
8480    if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
8481        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8482                             DRAWSTATE_INVALID_EVENT, "DS", "Submitting cmdbuffer with call to VkCmdWaitEvents "
8483                                                            "using srcStageMask 0x%X which must be the bitwise "
8484                                                            "OR of the stageMask parameters used in calls to "
8485                                                            "vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if "
8486                                                            "used with vkSetEvent but instead is 0x%X.",
8487                             sourceStageMask, stageMask);
8488    }
8489    return skip_call;
8490}
8491
8492VKAPI_ATTR void VKAPI_CALL
8493CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask,
8494              VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8495              uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8496              uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8497    bool skip_call = false;
8498    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8499    std::unique_lock<std::mutex> lock(global_lock);
8500    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8501    if (pCB) {
8502        auto firstEventIndex = pCB->events.size();
8503        for (uint32_t i = 0; i < eventCount; ++i) {
8504            auto event_node = getEventNode(dev_data, pEvents[i]);
8505            if (event_node) {
8506                addCommandBufferBinding(&event_node->cb_bindings,
8507                                        {reinterpret_cast<const uint64_t &>(pEvents[i]), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT},
8508                                        pCB);
8509                event_node->cb_bindings.insert(pCB);
8510            }
8511            pCB->waitedEvents.insert(pEvents[i]);
8512            pCB->events.push_back(pEvents[i]);
8513        }
8514        std::function<bool(VkQueue)> eventUpdate =
8515            std::bind(validateEventStageMask, std::placeholders::_1, pCB, eventCount, firstEventIndex, sourceStageMask);
8516        pCB->eventUpdates.push_back(eventUpdate);
8517        if (pCB->state == CB_RECORDING) {
8518            skip_call |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()");
8519        } else {
8520            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
8521        }
8522        skip_call |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8523        skip_call |=
8524            ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8525                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8526    }
8527    lock.unlock();
8528    if (!skip_call)
8529        dev_data->device_dispatch_table->CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
8530                                                       memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8531                                                       pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8532}
8533
8534VKAPI_ATTR void VKAPI_CALL
8535CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
8536                   VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8537                   uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8538                   uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8539    bool skip_call = false;
8540    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8541    std::unique_lock<std::mutex> lock(global_lock);
8542    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8543    if (pCB) {
8544        skip_call |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
8545        skip_call |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8546        skip_call |=
8547            ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8548                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8549    }
8550    lock.unlock();
8551    if (!skip_call)
8552        dev_data->device_dispatch_table->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
8553                                                            memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8554                                                            pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8555}
8556
8557bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
8558    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8559    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8560    if (pCB) {
8561        pCB->queryToStateMap[object] = value;
8562    }
8563    auto queue_data = dev_data->queueMap.find(queue);
8564    if (queue_data != dev_data->queueMap.end()) {
8565        queue_data->second.queryToStateMap[object] = value;
8566    }
8567    return false;
8568}
8569
8570VKAPI_ATTR void VKAPI_CALL
8571CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
8572    bool skip_call = false;
8573    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8574    std::unique_lock<std::mutex> lock(global_lock);
8575    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8576    if (pCB) {
8577        QueryObject query = {queryPool, slot};
8578        pCB->activeQueries.insert(query);
8579        if (!pCB->startedQueries.count(query)) {
8580            pCB->startedQueries.insert(query);
8581        }
8582        skip_call |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
8583        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
8584                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
8585    }
8586    lock.unlock();
8587    if (!skip_call)
8588        dev_data->device_dispatch_table->CmdBeginQuery(commandBuffer, queryPool, slot, flags);
8589}
8590
8591VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
8592    bool skip_call = false;
8593    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8594    std::unique_lock<std::mutex> lock(global_lock);
8595    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8596    if (pCB) {
8597        QueryObject query = {queryPool, slot};
8598        if (!pCB->activeQueries.count(query)) {
8599            skip_call |=
8600                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8601                        DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d",
8602                        (uint64_t)(queryPool), slot);
8603        } else {
8604            pCB->activeQueries.erase(query);
8605        }
8606        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
8607        pCB->queryUpdates.push_back(queryUpdate);
8608        if (pCB->state == CB_RECORDING) {
8609            skip_call |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
8610        } else {
8611            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
8612        }
8613        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
8614                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
8615    }
8616    lock.unlock();
8617    if (!skip_call)
8618        dev_data->device_dispatch_table->CmdEndQuery(commandBuffer, queryPool, slot);
8619}
8620
8621VKAPI_ATTR void VKAPI_CALL
8622CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
8623    bool skip_call = false;
8624    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8625    std::unique_lock<std::mutex> lock(global_lock);
8626    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8627    if (pCB) {
8628        for (uint32_t i = 0; i < queryCount; i++) {
8629            QueryObject query = {queryPool, firstQuery + i};
8630            pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
8631            std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, false);
8632            pCB->queryUpdates.push_back(queryUpdate);
8633        }
8634        if (pCB->state == CB_RECORDING) {
8635            skip_call |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
8636        } else {
8637            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
8638        }
8639        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool");
8640        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
8641                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
8642    }
8643    lock.unlock();
8644    if (!skip_call)
8645        dev_data->device_dispatch_table->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
8646}
8647
8648bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t queryCount, uint32_t firstQuery) {
8649    bool skip_call = false;
8650    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
8651    auto queue_data = dev_data->queueMap.find(queue);
8652    if (queue_data == dev_data->queueMap.end())
8653        return false;
8654    for (uint32_t i = 0; i < queryCount; i++) {
8655        QueryObject query = {queryPool, firstQuery + i};
8656        auto query_data = queue_data->second.queryToStateMap.find(query);
8657        bool fail = false;
8658        if (query_data != queue_data->second.queryToStateMap.end()) {
8659            if (!query_data->second) {
8660                fail = true;
8661            }
8662        } else {
8663            auto global_query_data = dev_data->queryToStateMap.find(query);
8664            if (global_query_data != dev_data->queryToStateMap.end()) {
8665                if (!global_query_data->second) {
8666                    fail = true;
8667                }
8668            } else {
8669                fail = true;
8670            }
8671        }
8672        if (fail) {
8673            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8674                                 DRAWSTATE_INVALID_QUERY, "DS",
8675                                 "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
8676                                 reinterpret_cast<uint64_t &>(queryPool), firstQuery + i);
8677        }
8678    }
8679    return skip_call;
8680}
8681
8682VKAPI_ATTR void VKAPI_CALL
8683CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
8684                        VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
8685    bool skip_call = false;
8686    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8687    std::unique_lock<std::mutex> lock(global_lock);
8688
8689    auto cb_node = getCBNode(dev_data, commandBuffer);
8690    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
8691    if (cb_node && dst_buff_node) {
8692        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdCopyQueryPoolResults()");
8693        // Update bindings between buffer and cmd buffer
8694        skip_call |= addCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node, "vkCmdCopyQueryPoolResults()");
8695        // Validate that DST buffer has correct usage flags set
8696        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8697                                              "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8698        std::function<bool()> function = [=]() {
8699            SetBufferMemoryValid(dev_data, dst_buff_node, true);
8700            return false;
8701        };
8702        cb_node->validate_functions.push_back(function);
8703        std::function<bool(VkQueue)> queryUpdate =
8704            std::bind(validateQuery, std::placeholders::_1, cb_node, queryPool, queryCount, firstQuery);
8705        cb_node->queryUpdates.push_back(queryUpdate);
8706        if (cb_node->state == CB_RECORDING) {
8707            skip_call |= addCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
8708        } else {
8709            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
8710        }
8711        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyQueryPoolResults()");
8712        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
8713                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, cb_node);
8714    } else {
8715        assert(0);
8716    }
8717    lock.unlock();
8718    if (!skip_call)
8719        dev_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer,
8720                                                                 dstOffset, stride, flags);
8721}
8722
8723VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
8724                                            VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
8725                                            const void *pValues) {
8726    bool skip_call = false;
8727    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8728    std::unique_lock<std::mutex> lock(global_lock);
8729    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8730    if (pCB) {
8731        if (pCB->state == CB_RECORDING) {
8732            skip_call |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
8733        } else {
8734            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
8735        }
8736    }
8737    skip_call |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
8738    if (0 == stageFlags) {
8739        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8740                             DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() call has no stageFlags set.");
8741    }
8742
8743    // Check if push constant update is within any of the ranges with the same stage flags specified in pipeline layout.
8744    auto pipeline_layout = getPipelineLayout(dev_data, layout);
8745    // Coalesce adjacent/overlapping pipeline ranges before checking to see if incoming range is
8746    // contained in the pipeline ranges.
8747    // Build a {start, end} span list for ranges with matching stage flags.
8748    const auto &ranges = pipeline_layout->push_constant_ranges;
8749    struct span {
8750        uint32_t start;
8751        uint32_t end;
8752    };
8753    std::vector<span> spans;
8754    spans.reserve(ranges.size());
8755    for (const auto &iter : ranges) {
8756        if (iter.stageFlags == stageFlags) {
8757            spans.push_back({iter.offset, iter.offset + iter.size});
8758        }
8759    }
8760    if (spans.size() == 0) {
8761        // There were no ranges that matched the stageFlags.
8762        skip_call |=
8763            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8764                    DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() stageFlags = 0x%" PRIx32 " do not match "
8765                                                          "the stageFlags in any of the ranges in pipeline layout 0x%" PRIx64 ".",
8766                    (uint32_t)stageFlags, (uint64_t)layout);
8767    } else {
8768        // Sort span list by start value.
8769        struct comparer {
8770            bool operator()(struct span i, struct span j) { return i.start < j.start; }
8771        } my_comparer;
8772        std::sort(spans.begin(), spans.end(), my_comparer);
8773
8774        // Examine two spans at a time.
8775        std::vector<span>::iterator current = spans.begin();
8776        std::vector<span>::iterator next = current + 1;
8777        while (next != spans.end()) {
8778            if (current->end < next->start) {
8779                // There is a gap; cannot coalesce. Move to the next two spans.
8780                ++current;
8781                ++next;
8782            } else {
8783                // Coalesce the two spans.  The start of the next span
8784                // is within the current span, so pick the larger of
8785                // the end values to extend the current span.
8786                // Then delete the next span and set next to the span after it.
8787                current->end = max(current->end, next->end);
8788                next = spans.erase(next);
8789            }
8790        }
8791
8792        // Now we can check if the incoming range is within any of the spans.
8793        bool contained_in_a_range = false;
8794        for (uint32_t i = 0; i < spans.size(); ++i) {
8795            if ((offset >= spans[i].start) && ((uint64_t)offset + (uint64_t)size <= (uint64_t)spans[i].end)) {
8796                contained_in_a_range = true;
8797                break;
8798            }
8799        }
8800        if (!contained_in_a_range) {
8801            skip_call |=
8802                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8803                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() Push constant range [%d, %d) "
8804                                                              "with stageFlags = 0x%" PRIx32 " "
8805                                                              "not within flag-matching ranges in pipeline layout 0x%" PRIx64 ".",
8806                        offset, offset + size, (uint32_t)stageFlags, (uint64_t)layout);
8807        }
8808    }
8809    lock.unlock();
8810    if (!skip_call)
8811        dev_data->device_dispatch_table->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
8812}
8813
8814VKAPI_ATTR void VKAPI_CALL
8815CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
8816    bool skip_call = false;
8817    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8818    std::unique_lock<std::mutex> lock(global_lock);
8819    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8820    if (pCB) {
8821        QueryObject query = {queryPool, slot};
8822        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
8823        pCB->queryUpdates.push_back(queryUpdate);
8824        if (pCB->state == CB_RECORDING) {
8825            skip_call |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
8826        } else {
8827            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
8828        }
8829    }
8830    lock.unlock();
8831    if (!skip_call)
8832        dev_data->device_dispatch_table->CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
8833}
8834
8835static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
8836                       const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag) {
8837    bool skip_call = false;
8838
8839    for (uint32_t attach = 0; attach < count; attach++) {
8840        if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
8841            // Attachment counts are verified elsewhere, but prevent an invalid access
8842            if (attachments[attach].attachment < fbci->attachmentCount) {
8843                const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
8844                VkImageViewCreateInfo *ivci = getImageViewData(dev_data, *image_view);
8845                if (ivci != nullptr) {
8846                    const VkImageCreateInfo *ici = &getImageNode(dev_data, ivci->image)->createInfo;
8847                    if (ici != nullptr) {
8848                        if ((ici->usage & usage_flag) == 0) {
8849                            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
8850                                                 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_USAGE, "DS",
8851                                                 "vkCreateFramebuffer:  Framebuffer Attachment (%d) conflicts with the image's "
8852                                                 "IMAGE_USAGE flags (%s).",
8853                                                 attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
8854                        }
8855                    }
8856                }
8857            }
8858        }
8859    }
8860    return skip_call;
8861}
8862
8863// Validate VkFramebufferCreateInfo which includes:
8864// 1. attachmentCount equals renderPass attachmentCount
8865// 2. corresponding framebuffer and renderpass attachments have matching formats
8866// 3. corresponding framebuffer and renderpass attachments have matching sample counts
8867// 4. fb attachments only have a single mip level
8868// 5. fb attachment dimensions are each at least as large as the fb
8869// 6. fb attachments use idenity swizzle
8870// 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
8871// 8. fb dimensions are within physical device limits
8872static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
8873    bool skip_call = false;
8874
8875    auto rp_node = getRenderPass(dev_data, pCreateInfo->renderPass);
8876    if (rp_node) {
8877        const VkRenderPassCreateInfo *rpci = rp_node->pCreateInfo;
8878        if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
8879            skip_call |= log_msg(
8880                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8881                reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
8882                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount of %u of "
8883                "renderPass (0x%" PRIxLEAST64 ") being used to create Framebuffer.",
8884                pCreateInfo->attachmentCount, rpci->attachmentCount, reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
8885        } else {
8886            // attachmentCounts match, so make sure corresponding attachment details line up
8887            const VkImageView *image_views = pCreateInfo->pAttachments;
8888            for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8889                VkImageViewCreateInfo *ivci = getImageViewData(dev_data, image_views[i]);
8890                if (ivci->format != rpci->pAttachments[i].format) {
8891                    skip_call |= log_msg(
8892                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8893                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
8894                        "DS", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not match "
8895                              "the format of "
8896                              "%s used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 ").",
8897                        i, string_VkFormat(ivci->format), string_VkFormat(rpci->pAttachments[i].format),
8898                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
8899                }
8900                const VkImageCreateInfo *ici = &getImageNode(dev_data, ivci->image)->createInfo;
8901                if (ici->samples != rpci->pAttachments[i].samples) {
8902                    skip_call |= log_msg(
8903                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8904                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
8905                        "DS", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match "
8906                              "the %s samples used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 ").",
8907                        i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
8908                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
8909                }
8910                // Verify that view only has a single mip level
8911                if (ivci->subresourceRange.levelCount != 1) {
8912                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
8913                                         __LINE__, DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
8914                                         "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u "
8915                                         "but only a single mip level (levelCount ==  1) is allowed when creating a Framebuffer.",
8916                                         i, ivci->subresourceRange.levelCount);
8917                }
8918                const uint32_t mip_level = ivci->subresourceRange.baseMipLevel;
8919                uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
8920                uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
8921                if ((ivci->subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
8922                    (mip_height < pCreateInfo->height)) {
8923                    skip_call |=
8924                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
8925                                DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
8926                                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions smaller "
8927                                "than the corresponding "
8928                                "framebuffer dimensions. Attachment dimensions must be at least as large. Here are the respective "
8929                                "dimensions for "
8930                                "attachment #%u, framebuffer:\n"
8931                                "width: %u, %u\n"
8932                                "height: %u, %u\n"
8933                                "layerCount: %u, %u\n",
8934                                i, ivci->subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height,
8935                                pCreateInfo->height, ivci->subresourceRange.layerCount, pCreateInfo->layers);
8936                }
8937                if (((ivci->components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci->components.r != VK_COMPONENT_SWIZZLE_R)) ||
8938                    ((ivci->components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci->components.g != VK_COMPONENT_SWIZZLE_G)) ||
8939                    ((ivci->components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci->components.b != VK_COMPONENT_SWIZZLE_B)) ||
8940                    ((ivci->components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci->components.a != VK_COMPONENT_SWIZZLE_A))) {
8941                    skip_call |= log_msg(
8942                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
8943                        DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
8944                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All framebuffer "
8945                        "attachments must have been created with the identity swizzle. Here are the actual swizzle values:\n"
8946                        "r swizzle = %s\n"
8947                        "g swizzle = %s\n"
8948                        "b swizzle = %s\n"
8949                        "a swizzle = %s\n",
8950                        i, string_VkComponentSwizzle(ivci->components.r), string_VkComponentSwizzle(ivci->components.g),
8951                        string_VkComponentSwizzle(ivci->components.b), string_VkComponentSwizzle(ivci->components.a));
8952                }
8953            }
8954        }
8955        // Verify correct attachment usage flags
8956        for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
8957            // Verify input attachments:
8958            skip_call |= MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount,
8959                                    rpci->pSubpasses[subpass].pInputAttachments, pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT);
8960            // Verify color attachments:
8961            skip_call |= MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount,
8962                                    rpci->pSubpasses[subpass].pColorAttachments, pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
8963            // Verify depth/stencil attachments:
8964            if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
8965                skip_call |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
8966                                        VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
8967            }
8968        }
8969    } else {
8970        skip_call |=
8971            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8972                    reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8973                    "vkCreateFramebuffer(): Attempt to create framebuffer with invalid renderPass (0x%" PRIxLEAST64 ").",
8974                    reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
8975    }
8976    // Verify FB dimensions are within physical device limits
8977    if ((pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) ||
8978        (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) ||
8979        (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers)) {
8980        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
8981                             DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
8982                             "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo dimensions exceed physical device limits. "
8983                             "Here are the respective dimensions: requested, device max:\n"
8984                             "width: %u, %u\n"
8985                             "height: %u, %u\n"
8986                             "layerCount: %u, %u\n",
8987                             pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth,
8988                             pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight,
8989                             pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers);
8990    }
8991    return skip_call;
8992}
8993
8994// Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
8995//  Return true if an error is encountered and callback returns true to skip call down chain
8996//   false indicates that call down chain should proceed
8997static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
8998    // TODO : Verify that renderPass FB is created with is compatible with FB
8999    bool skip_call = false;
9000    skip_call |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
9001    return skip_call;
9002}
9003
9004// CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
9005static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
9006    // Shadow create info and store in map
9007    std::unique_ptr<FRAMEBUFFER_NODE> fb_node(
9008        new FRAMEBUFFER_NODE(fb, pCreateInfo, dev_data->renderPassMap[pCreateInfo->renderPass]->pCreateInfo));
9009
9010    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9011        VkImageView view = pCreateInfo->pAttachments[i];
9012        auto view_data = getImageViewData(dev_data, view);
9013        if (!view_data) {
9014            continue;
9015        }
9016        MT_FB_ATTACHMENT_INFO fb_info;
9017        fb_info.mem = getImageNode(dev_data, view_data->image)->mem;
9018        fb_info.image = view_data->image;
9019        fb_node->attachments.push_back(fb_info);
9020    }
9021    dev_data->frameBufferMap[fb] = std::move(fb_node);
9022}
9023
9024VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
9025                                                 const VkAllocationCallbacks *pAllocator,
9026                                                 VkFramebuffer *pFramebuffer) {
9027    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9028    std::unique_lock<std::mutex> lock(global_lock);
9029    bool skip_call = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
9030    lock.unlock();
9031
9032    if (skip_call)
9033        return VK_ERROR_VALIDATION_FAILED_EXT;
9034
9035    VkResult result = dev_data->device_dispatch_table->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
9036
9037    if (VK_SUCCESS == result) {
9038        lock.lock();
9039        PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
9040        lock.unlock();
9041    }
9042    return result;
9043}
9044
9045static bool FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
9046                           std::unordered_set<uint32_t> &processed_nodes) {
9047    // If we have already checked this node we have not found a dependency path so return false.
9048    if (processed_nodes.count(index))
9049        return false;
9050    processed_nodes.insert(index);
9051    const DAGNode &node = subpass_to_node[index];
9052    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
9053    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
9054        for (auto elem : node.prev) {
9055            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
9056                return true;
9057        }
9058    } else {
9059        return true;
9060    }
9061    return false;
9062}
9063
9064static bool CheckDependencyExists(const layer_data *my_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
9065                                  const std::vector<DAGNode> &subpass_to_node, bool &skip_call) {
9066    bool result = true;
9067    // Loop through all subpasses that share the same attachment and make sure a dependency exists
9068    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
9069        if (static_cast<uint32_t>(subpass) == dependent_subpasses[k])
9070            continue;
9071        const DAGNode &node = subpass_to_node[subpass];
9072        // Check for a specified dependency between the two nodes. If one exists we are done.
9073        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
9074        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
9075        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
9076            // If no dependency exits an implicit dependency still might. If not, throw an error.
9077            std::unordered_set<uint32_t> processed_nodes;
9078            if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
9079                FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
9080                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9081                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9082                                     "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
9083                                     dependent_subpasses[k]);
9084                result = false;
9085            }
9086        }
9087    }
9088    return result;
9089}
9090
9091static bool CheckPreserved(const layer_data *my_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
9092                           const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip_call) {
9093    const DAGNode &node = subpass_to_node[index];
9094    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
9095    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9096    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9097        if (attachment == subpass.pColorAttachments[j].attachment)
9098            return true;
9099    }
9100    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9101        if (attachment == subpass.pDepthStencilAttachment->attachment)
9102            return true;
9103    }
9104    bool result = false;
9105    // Loop through previous nodes and see if any of them write to the attachment.
9106    for (auto elem : node.prev) {
9107        result |= CheckPreserved(my_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
9108    }
9109    // If the attachment was written to by a previous node than this node needs to preserve it.
9110    if (result && depth > 0) {
9111        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9112        bool has_preserved = false;
9113        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9114            if (subpass.pPreserveAttachments[j] == attachment) {
9115                has_preserved = true;
9116                break;
9117            }
9118        }
9119        if (!has_preserved) {
9120            skip_call |=
9121                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9122                        DRAWSTATE_INVALID_RENDERPASS, "DS",
9123                        "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
9124        }
9125    }
9126    return result;
9127}
9128
9129template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
9130    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
9131           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
9132}
9133
9134bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
9135    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
9136            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
9137}
9138
9139static bool ValidateDependencies(const layer_data *my_data, FRAMEBUFFER_NODE const * framebuffer,
9140                                 RENDER_PASS_NODE const * renderPass) {
9141    bool skip_call = false;
9142    const safe_VkFramebufferCreateInfo *pFramebufferInfo = &framebuffer->createInfo;
9143    const VkRenderPassCreateInfo *pCreateInfo = renderPass->pCreateInfo;
9144    auto const & subpass_to_node = renderPass->subpassToNode;
9145    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
9146    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
9147    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
9148    // Find overlapping attachments
9149    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9150        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
9151            VkImageView viewi = pFramebufferInfo->pAttachments[i];
9152            VkImageView viewj = pFramebufferInfo->pAttachments[j];
9153            if (viewi == viewj) {
9154                overlapping_attachments[i].push_back(j);
9155                overlapping_attachments[j].push_back(i);
9156                continue;
9157            }
9158            auto view_data_i = getImageViewData(my_data, viewi);
9159            auto view_data_j = getImageViewData(my_data, viewj);
9160            if (!view_data_i || !view_data_j) {
9161                continue;
9162            }
9163            if (view_data_i->image == view_data_j->image &&
9164                isRegionOverlapping(view_data_i->subresourceRange, view_data_j->subresourceRange)) {
9165                overlapping_attachments[i].push_back(j);
9166                overlapping_attachments[j].push_back(i);
9167                continue;
9168            }
9169            auto image_data_i = getImageNode(my_data, view_data_i->image);
9170            auto image_data_j = getImageNode(my_data, view_data_j->image);
9171            if (!image_data_i || !image_data_j) {
9172                continue;
9173            }
9174            if (image_data_i->mem == image_data_j->mem && isRangeOverlapping(image_data_i->memOffset, image_data_i->memSize,
9175                                                                             image_data_j->memOffset, image_data_j->memSize)) {
9176                overlapping_attachments[i].push_back(j);
9177                overlapping_attachments[j].push_back(i);
9178            }
9179        }
9180    }
9181    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
9182        uint32_t attachment = i;
9183        for (auto other_attachment : overlapping_attachments[i]) {
9184            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9185                skip_call |=
9186                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9187                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9188                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9189                            attachment, other_attachment);
9190            }
9191            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9192                skip_call |=
9193                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9194                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9195                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9196                            other_attachment, attachment);
9197            }
9198        }
9199    }
9200    // Find for each attachment the subpasses that use them.
9201    unordered_set<uint32_t> attachmentIndices;
9202    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9203        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9204        attachmentIndices.clear();
9205        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9206            uint32_t attachment = subpass.pInputAttachments[j].attachment;
9207            if (attachment == VK_ATTACHMENT_UNUSED)
9208                continue;
9209            input_attachment_to_subpass[attachment].push_back(i);
9210            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9211                input_attachment_to_subpass[overlapping_attachment].push_back(i);
9212            }
9213        }
9214        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9215            uint32_t attachment = subpass.pColorAttachments[j].attachment;
9216            if (attachment == VK_ATTACHMENT_UNUSED)
9217                continue;
9218            output_attachment_to_subpass[attachment].push_back(i);
9219            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9220                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9221            }
9222            attachmentIndices.insert(attachment);
9223        }
9224        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9225            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9226            output_attachment_to_subpass[attachment].push_back(i);
9227            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9228                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9229            }
9230
9231            if (attachmentIndices.count(attachment)) {
9232                skip_call |=
9233                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
9234                            0, __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9235                            "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).",
9236                            attachment, i);
9237            }
9238        }
9239    }
9240    // If there is a dependency needed make sure one exists
9241    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9242        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9243        // If the attachment is an input then all subpasses that output must have a dependency relationship
9244        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9245            uint32_t attachment = subpass.pInputAttachments[j].attachment;
9246            if (attachment == VK_ATTACHMENT_UNUSED)
9247                continue;
9248            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9249        }
9250        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
9251        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9252            uint32_t attachment = subpass.pColorAttachments[j].attachment;
9253            if (attachment == VK_ATTACHMENT_UNUSED)
9254                continue;
9255            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9256            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9257        }
9258        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9259            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
9260            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9261            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9262        }
9263    }
9264    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
9265    // written.
9266    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9267        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9268        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9269            CheckPreserved(my_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
9270        }
9271    }
9272    return skip_call;
9273}
9274// ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the
9275// VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that
9276// READ_ONLY layout attachments don't have CLEAR as their loadOp.
9277static bool ValidateLayoutVsAttachmentDescription(debug_report_data *report_data, const VkImageLayout first_layout,
9278                                                  const uint32_t attachment,
9279                                                  const VkAttachmentDescription &attachment_description) {
9280    bool skip_call = false;
9281    // Verify that initial loadOp on READ_ONLY attachments is not CLEAR
9282    if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
9283        if ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
9284            (first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)) {
9285            skip_call |=
9286                log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
9287                        VkDebugReportObjectTypeEXT(0), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9288                        "Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout));
9289        }
9290    }
9291    return skip_call;
9292}
9293
9294static bool ValidateLayouts(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
9295    bool skip = false;
9296
9297    // Track when we're observing the first use of an attachment
9298    std::vector<bool> attach_first_use(pCreateInfo->attachmentCount, true);
9299    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9300        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9301        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9302            auto attach_index = subpass.pColorAttachments[j].attachment;
9303            if (attach_index == VK_ATTACHMENT_UNUSED)
9304                continue;
9305
9306            switch (subpass.pColorAttachments[j].layout) {
9307            case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
9308                /* This is ideal. */
9309                break;
9310
9311            case VK_IMAGE_LAYOUT_GENERAL:
9312                /* May not be optimal; TODO: reconsider this warning based on
9313                 * other constraints?
9314                 */
9315                skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9316                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
9317                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9318                                "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
9319                break;
9320
9321            default:
9322                skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9323                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
9324                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9325                                "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
9326                                string_VkImageLayout(subpass.pColorAttachments[j].layout));
9327            }
9328
9329            if (attach_first_use[attach_index]) {
9330                skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pColorAttachments[j].layout,
9331                                                              attach_index, pCreateInfo->pAttachments[attach_index]);
9332            }
9333            attach_first_use[attach_index] = false;
9334        }
9335        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9336            switch (subpass.pDepthStencilAttachment->layout) {
9337            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
9338            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
9339                /* These are ideal. */
9340                break;
9341
9342            case VK_IMAGE_LAYOUT_GENERAL:
9343                /* May not be optimal; TODO: reconsider this warning based on
9344                 * other constraints? GENERAL can be better than doing a bunch
9345                 * of transitions.
9346                 */
9347                skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9348                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
9349                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9350                                "GENERAL layout for depth attachment may not give optimal performance.");
9351                break;
9352
9353            default:
9354                /* No other layouts are acceptable */
9355                skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9356                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
9357                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9358                                "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "
9359                                "DEPTH_STENCIL_READ_ONLY_OPTIMAL or GENERAL.",
9360                                string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
9361            }
9362
9363            auto attach_index = subpass.pDepthStencilAttachment->attachment;
9364            if (attach_first_use[attach_index]) {
9365                skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pDepthStencilAttachment->layout,
9366                                                              attach_index, pCreateInfo->pAttachments[attach_index]);
9367            }
9368            attach_first_use[attach_index] = false;
9369        }
9370        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9371            auto attach_index = subpass.pInputAttachments[j].attachment;
9372            if (attach_index == VK_ATTACHMENT_UNUSED)
9373                continue;
9374
9375            switch (subpass.pInputAttachments[j].layout) {
9376            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
9377            case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
9378                /* These are ideal. */
9379                break;
9380
9381            case VK_IMAGE_LAYOUT_GENERAL:
9382                /* May not be optimal. TODO: reconsider this warning based on
9383                 * other constraints.
9384                 */
9385                skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9386                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
9387                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9388                                "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
9389                break;
9390
9391            default:
9392                /* No other layouts are acceptable */
9393                skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9394                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9395                                "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
9396                                string_VkImageLayout(subpass.pInputAttachments[j].layout));
9397            }
9398
9399            if (attach_first_use[attach_index]) {
9400                skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pInputAttachments[j].layout,
9401                                                              attach_index, pCreateInfo->pAttachments[attach_index]);
9402            }
9403            attach_first_use[attach_index] = false;
9404        }
9405    }
9406    return skip;
9407}
9408
9409static bool CreatePassDAG(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9410                          std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
9411    bool skip_call = false;
9412    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9413        DAGNode &subpass_node = subpass_to_node[i];
9414        subpass_node.pass = i;
9415    }
9416    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
9417        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
9418        if (dependency.srcSubpass > dependency.dstSubpass && dependency.srcSubpass != VK_SUBPASS_EXTERNAL &&
9419            dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
9420            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9421                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
9422                                 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
9423        } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL && dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
9424            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9425                                 DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
9426        } else if (dependency.srcSubpass == dependency.dstSubpass) {
9427            has_self_dependency[dependency.srcSubpass] = true;
9428        }
9429        if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
9430            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
9431        }
9432        if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL) {
9433            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
9434        }
9435    }
9436    return skip_call;
9437}
9438
9439
9440VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
9441                                                  const VkAllocationCallbacks *pAllocator,
9442                                                  VkShaderModule *pShaderModule) {
9443    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9444    bool skip_call = false;
9445
9446    /* Use SPIRV-Tools validator to try and catch any issues with the module itself */
9447    spv_context ctx = spvContextCreate(SPV_ENV_VULKAN_1_0);
9448    spv_const_binary_t binary { pCreateInfo->pCode, pCreateInfo->codeSize / sizeof(uint32_t) };
9449    spv_diagnostic diag = nullptr;
9450
9451    auto result = spvValidate(ctx, &binary, &diag);
9452    if (result != SPV_SUCCESS) {
9453        skip_call |= log_msg(my_data->report_data,
9454                             result == SPV_WARNING ? VK_DEBUG_REPORT_WARNING_BIT_EXT : VK_DEBUG_REPORT_ERROR_BIT_EXT,
9455                             VkDebugReportObjectTypeEXT(0), 0,
9456                             __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC", "SPIR-V module not valid: %s",
9457                             diag && diag->error ? diag->error : "(no error text)");
9458    }
9459
9460    spvDiagnosticDestroy(diag);
9461    spvContextDestroy(ctx);
9462
9463    if (skip_call)
9464        return VK_ERROR_VALIDATION_FAILED_EXT;
9465
9466    VkResult res = my_data->device_dispatch_table->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
9467
9468    if (res == VK_SUCCESS) {
9469        std::lock_guard<std::mutex> lock(global_lock);
9470        my_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
9471    }
9472    return res;
9473}
9474
9475static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
9476    bool skip_call = false;
9477    if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
9478        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9479                             DRAWSTATE_INVALID_ATTACHMENT_INDEX, "DS",
9480                             "CreateRenderPass: %s attachment %d cannot be greater than the total number of attachments %d.",
9481                             type, attachment, attachment_count);
9482    }
9483    return skip_call;
9484}
9485
9486static bool IsPowerOfTwo(unsigned x) {
9487    return x && !(x & (x-1));
9488}
9489
9490static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
9491    bool skip_call = false;
9492    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9493        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9494        if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
9495            skip_call |=
9496                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9497                        DRAWSTATE_INVALID_RENDERPASS, "DS",
9498                        "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", i);
9499        }
9500        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9501            uint32_t attachment = subpass.pPreserveAttachments[j];
9502            if (attachment == VK_ATTACHMENT_UNUSED) {
9503                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9504                                     __LINE__, DRAWSTATE_INVALID_ATTACHMENT_INDEX, "DS",
9505                                     "CreateRenderPass:  Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", j);
9506            } else {
9507                skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
9508            }
9509        }
9510
9511        auto subpass_performs_resolve = subpass.pResolveAttachments && std::any_of(
9512            subpass.pResolveAttachments, subpass.pResolveAttachments + subpass.colorAttachmentCount,
9513            [](VkAttachmentReference ref) { return ref.attachment != VK_ATTACHMENT_UNUSED; });
9514
9515        unsigned sample_count = 0;
9516
9517        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9518            uint32_t attachment;
9519            if (subpass.pResolveAttachments) {
9520                attachment = subpass.pResolveAttachments[j].attachment;
9521                skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve");
9522
9523                if (!skip_call && attachment != VK_ATTACHMENT_UNUSED &&
9524                    pCreateInfo->pAttachments[attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
9525                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
9526                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9527                                         "CreateRenderPass:  Subpass %u requests multisample resolve into attachment %u, "
9528                                         "which must have VK_SAMPLE_COUNT_1_BIT but has %s",
9529                                         i, attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples));
9530                }
9531            }
9532            attachment = subpass.pColorAttachments[j].attachment;
9533            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color");
9534
9535            if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
9536                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
9537
9538                if (subpass_performs_resolve &&
9539                    pCreateInfo->pAttachments[attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
9540                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
9541                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9542                                         "CreateRenderPass:  Subpass %u requests multisample resolve from attachment %u "
9543                                         "which has VK_SAMPLE_COUNT_1_BIT",
9544                                         i, attachment);
9545                }
9546            }
9547        }
9548
9549        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9550            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9551            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil");
9552
9553            if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
9554                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
9555            }
9556        }
9557
9558        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9559            uint32_t attachment = subpass.pInputAttachments[j].attachment;
9560            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input");
9561        }
9562
9563        if (sample_count && !IsPowerOfTwo(sample_count)) {
9564            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
9565                                 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9566                                 "CreateRenderPass:  Subpass %u attempts to render to "
9567                                 "attachments with inconsistent sample counts",
9568                                 i);
9569        }
9570    }
9571    return skip_call;
9572}
9573
9574VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9575                                                const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
9576    bool skip_call = false;
9577    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9578
9579    std::unique_lock<std::mutex> lock(global_lock);
9580
9581    skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
9582    // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
9583    //       ValidateLayouts.
9584    skip_call |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
9585    lock.unlock();
9586
9587    if (skip_call) {
9588        return VK_ERROR_VALIDATION_FAILED_EXT;
9589    }
9590
9591    VkResult result = dev_data->device_dispatch_table->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
9592
9593    if (VK_SUCCESS == result) {
9594        lock.lock();
9595
9596        std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
9597        std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
9598        skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
9599
9600        // Shadow create info and store in map
9601        VkRenderPassCreateInfo *localRPCI = new VkRenderPassCreateInfo(*pCreateInfo);
9602        if (pCreateInfo->pAttachments) {
9603            localRPCI->pAttachments = new VkAttachmentDescription[localRPCI->attachmentCount];
9604            memcpy((void *)localRPCI->pAttachments, pCreateInfo->pAttachments,
9605                   localRPCI->attachmentCount * sizeof(VkAttachmentDescription));
9606        }
9607        if (pCreateInfo->pSubpasses) {
9608            localRPCI->pSubpasses = new VkSubpassDescription[localRPCI->subpassCount];
9609            memcpy((void *)localRPCI->pSubpasses, pCreateInfo->pSubpasses, localRPCI->subpassCount * sizeof(VkSubpassDescription));
9610
9611            for (uint32_t i = 0; i < localRPCI->subpassCount; i++) {
9612                VkSubpassDescription *subpass = (VkSubpassDescription *)&localRPCI->pSubpasses[i];
9613                const uint32_t attachmentCount = subpass->inputAttachmentCount +
9614                                                 subpass->colorAttachmentCount * (1 + (subpass->pResolveAttachments ? 1 : 0)) +
9615                                                 ((subpass->pDepthStencilAttachment) ? 1 : 0) + subpass->preserveAttachmentCount;
9616                VkAttachmentReference *attachments = new VkAttachmentReference[attachmentCount];
9617
9618                memcpy(attachments, subpass->pInputAttachments, sizeof(attachments[0]) * subpass->inputAttachmentCount);
9619                subpass->pInputAttachments = attachments;
9620                attachments += subpass->inputAttachmentCount;
9621
9622                memcpy(attachments, subpass->pColorAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
9623                subpass->pColorAttachments = attachments;
9624                attachments += subpass->colorAttachmentCount;
9625
9626                if (subpass->pResolveAttachments) {
9627                    memcpy(attachments, subpass->pResolveAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
9628                    subpass->pResolveAttachments = attachments;
9629                    attachments += subpass->colorAttachmentCount;
9630                }
9631
9632                if (subpass->pDepthStencilAttachment) {
9633                    memcpy(attachments, subpass->pDepthStencilAttachment, sizeof(attachments[0]) * 1);
9634                    subpass->pDepthStencilAttachment = attachments;
9635                    attachments += 1;
9636                }
9637
9638                memcpy(attachments, subpass->pPreserveAttachments, sizeof(attachments[0]) * subpass->preserveAttachmentCount);
9639                subpass->pPreserveAttachments = &attachments->attachment;
9640            }
9641        }
9642        if (pCreateInfo->pDependencies) {
9643            localRPCI->pDependencies = new VkSubpassDependency[localRPCI->dependencyCount];
9644            memcpy((void *)localRPCI->pDependencies, pCreateInfo->pDependencies,
9645                   localRPCI->dependencyCount * sizeof(VkSubpassDependency));
9646        }
9647
9648        auto render_pass = new RENDER_PASS_NODE(localRPCI);
9649        render_pass->renderPass = *pRenderPass;
9650        render_pass->hasSelfDependency = has_self_dependency;
9651        render_pass->subpassToNode = subpass_to_node;
9652#if MTMERGESOURCE
9653        // MTMTODO : Merge with code from above to eliminate duplication
9654        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9655            VkAttachmentDescription desc = pCreateInfo->pAttachments[i];
9656            MT_PASS_ATTACHMENT_INFO pass_info;
9657            pass_info.load_op = desc.loadOp;
9658            pass_info.store_op = desc.storeOp;
9659            pass_info.stencil_load_op = desc.stencilLoadOp;
9660            pass_info.stencil_store_op = desc.stencilStoreOp;
9661            pass_info.attachment = i;
9662            render_pass->attachments.push_back(pass_info);
9663        }
9664        // TODO: Maybe fill list and then copy instead of locking
9665        std::unordered_map<uint32_t, bool> &attachment_first_read = render_pass->attachment_first_read;
9666        std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout = render_pass->attachment_first_layout;
9667        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9668            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9669            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9670                uint32_t attachment = subpass.pColorAttachments[j].attachment;
9671                if (!attachment_first_read.count(attachment)) {
9672                    attachment_first_read.insert(std::make_pair(attachment, false));
9673                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
9674                }
9675            }
9676            if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9677                uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9678                if (!attachment_first_read.count(attachment)) {
9679                    attachment_first_read.insert(std::make_pair(attachment, false));
9680                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
9681                }
9682            }
9683            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9684                uint32_t attachment = subpass.pInputAttachments[j].attachment;
9685                if (!attachment_first_read.count(attachment)) {
9686                    attachment_first_read.insert(std::make_pair(attachment, true));
9687                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
9688                }
9689            }
9690        }
9691#endif
9692        dev_data->renderPassMap[*pRenderPass] = render_pass;
9693    }
9694    return result;
9695}
9696
9697// Free the renderpass shadow
9698static void deleteRenderPasses(layer_data *my_data) {
9699    for (auto renderPass : my_data->renderPassMap) {
9700        const VkRenderPassCreateInfo *pRenderPassInfo = renderPass.second->pCreateInfo;
9701        delete[] pRenderPassInfo->pAttachments;
9702        if (pRenderPassInfo->pSubpasses) {
9703            for (uint32_t i = 0; i < pRenderPassInfo->subpassCount; ++i) {
9704                // Attachements are all allocated in a block, so just need to
9705                //  find the first non-null one to delete
9706                if (pRenderPassInfo->pSubpasses[i].pInputAttachments) {
9707                    delete[] pRenderPassInfo->pSubpasses[i].pInputAttachments;
9708                } else if (pRenderPassInfo->pSubpasses[i].pColorAttachments) {
9709                    delete[] pRenderPassInfo->pSubpasses[i].pColorAttachments;
9710                } else if (pRenderPassInfo->pSubpasses[i].pResolveAttachments) {
9711                    delete[] pRenderPassInfo->pSubpasses[i].pResolveAttachments;
9712                } else if (pRenderPassInfo->pSubpasses[i].pPreserveAttachments) {
9713                    delete[] pRenderPassInfo->pSubpasses[i].pPreserveAttachments;
9714                }
9715            }
9716            delete[] pRenderPassInfo->pSubpasses;
9717        }
9718        delete[] pRenderPassInfo->pDependencies;
9719        delete pRenderPassInfo;
9720        delete renderPass.second;
9721    }
9722    my_data->renderPassMap.clear();
9723}
9724
9725static bool VerifyFramebufferAndRenderPassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
9726    bool skip_call = false;
9727    const VkRenderPassCreateInfo *pRenderPassInfo = dev_data->renderPassMap[pRenderPassBegin->renderPass]->pCreateInfo;
9728    const safe_VkFramebufferCreateInfo framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer]->createInfo;
9729    if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
9730        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9731                             DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
9732                                                                 "with a different number of attachments.");
9733    }
9734    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9735        const VkImageView &image_view = framebufferInfo.pAttachments[i];
9736        auto image_data = getImageViewData(dev_data, image_view);
9737        assert(image_data);
9738        const VkImage &image = image_data->image;
9739        const VkImageSubresourceRange &subRange = image_data->subresourceRange;
9740        IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
9741                                             pRenderPassInfo->pAttachments[i].initialLayout};
9742        // TODO: Do not iterate over every possibility - consolidate where possible
9743        for (uint32_t j = 0; j < subRange.levelCount; j++) {
9744            uint32_t level = subRange.baseMipLevel + j;
9745            for (uint32_t k = 0; k < subRange.layerCount; k++) {
9746                uint32_t layer = subRange.baseArrayLayer + k;
9747                VkImageSubresource sub = {subRange.aspectMask, level, layer};
9748                IMAGE_CMD_BUF_LAYOUT_NODE node;
9749                if (!FindLayout(pCB, image, sub, node)) {
9750                    SetLayout(pCB, image, sub, newNode);
9751                    continue;
9752                }
9753                if (newNode.layout != VK_IMAGE_LAYOUT_UNDEFINED &&
9754                    newNode.layout != node.layout) {
9755                    skip_call |=
9756                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9757                                DRAWSTATE_INVALID_RENDERPASS, "DS",
9758                                "You cannot start a render pass using attachment %u "
9759                                "where the render pass initial layout is %s and the previous "
9760                                "known layout of the attachment is %s. The layouts must match, or "
9761                                "the render pass initial layout for the attachment must be "
9762                                "VK_IMAGE_LAYOUT_UNDEFINED",
9763                                i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout));
9764                }
9765            }
9766        }
9767    }
9768    return skip_call;
9769}
9770
9771static void TransitionAttachmentRefLayout(layer_data *dev_data, GLOBAL_CB_NODE *pCB,
9772                                          FRAMEBUFFER_NODE *pFramebuffer,
9773                                          VkAttachmentReference ref)
9774{
9775    if (ref.attachment != VK_ATTACHMENT_UNUSED) {
9776        auto image_view = pFramebuffer->createInfo.pAttachments[ref.attachment];
9777        SetLayout(dev_data, pCB, image_view, ref.layout);
9778    }
9779}
9780
9781static void TransitionSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin,
9782                                     const int subpass_index) {
9783    auto renderPass = getRenderPass(dev_data, pRenderPassBegin->renderPass);
9784    if (!renderPass)
9785        return;
9786
9787    auto framebuffer = getFramebuffer(dev_data, pRenderPassBegin->framebuffer);
9788    if (!framebuffer)
9789        return;
9790
9791    const VkSubpassDescription &subpass = renderPass->pCreateInfo->pSubpasses[subpass_index];
9792    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9793        TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pInputAttachments[j]);
9794    }
9795    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9796        TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pColorAttachments[j]);
9797    }
9798    if (subpass.pDepthStencilAttachment) {
9799        TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, *subpass.pDepthStencilAttachment);
9800    }
9801}
9802
9803static bool validatePrimaryCommandBuffer(const layer_data *my_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) {
9804    bool skip_call = false;
9805    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
9806        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9807                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.",
9808                             cmd_name.c_str());
9809    }
9810    return skip_call;
9811}
9812
9813static void TransitionFinalSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
9814    auto renderPass = getRenderPass(dev_data, pRenderPassBegin->renderPass);
9815    if (!renderPass)
9816        return;
9817
9818    const VkRenderPassCreateInfo *pRenderPassInfo = renderPass->pCreateInfo;
9819    auto framebuffer = getFramebuffer(dev_data, pRenderPassBegin->framebuffer);
9820    if (!framebuffer)
9821        return;
9822
9823    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9824        auto image_view = framebuffer->createInfo.pAttachments[i];
9825        SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
9826    }
9827}
9828
9829static bool VerifyRenderAreaBounds(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
9830    bool skip_call = false;
9831    const safe_VkFramebufferCreateInfo *pFramebufferInfo = &getFramebuffer(my_data, pRenderPassBegin->framebuffer)->createInfo;
9832    if (pRenderPassBegin->renderArea.offset.x < 0 ||
9833        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
9834        pRenderPassBegin->renderArea.offset.y < 0 ||
9835        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
9836        skip_call |= static_cast<bool>(log_msg(
9837            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9838            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
9839            "Cannot execute a render pass with renderArea not within the bound of the "
9840            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
9841            "height %d.",
9842            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
9843            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
9844    }
9845    return skip_call;
9846}
9847
9848// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
9849// [load|store]Op flag must be checked
9850// TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
9851template <typename T> static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
9852    if (color_depth_op != op && stencil_op != op) {
9853        return false;
9854    }
9855    bool check_color_depth_load_op = !vk_format_is_stencil_only(format);
9856    bool check_stencil_load_op = vk_format_is_depth_and_stencil(format) || !check_color_depth_load_op;
9857
9858    return (((check_color_depth_load_op == true) && (color_depth_op == op)) ||
9859            ((check_stencil_load_op == true) && (stencil_op == op)));
9860}
9861
9862VKAPI_ATTR void VKAPI_CALL
9863CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
9864    bool skip_call = false;
9865    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9866    std::unique_lock<std::mutex> lock(global_lock);
9867    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9868    auto renderPass = pRenderPassBegin ? getRenderPass(dev_data, pRenderPassBegin->renderPass) : nullptr;
9869    auto framebuffer = pRenderPassBegin ? getFramebuffer(dev_data, pRenderPassBegin->framebuffer) : nullptr;
9870    if (pCB) {
9871        if (renderPass) {
9872            uint32_t clear_op_size = 0; // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
9873            pCB->activeFramebuffer = pRenderPassBegin->framebuffer;
9874            for (size_t i = 0; i < renderPass->attachments.size(); ++i) {
9875                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
9876                VkFormat format = renderPass->pCreateInfo->pAttachments[renderPass->attachments[i].attachment].format;
9877                if (FormatSpecificLoadAndStoreOpSettings(format, renderPass->attachments[i].load_op,
9878                                                         renderPass->attachments[i].stencil_load_op,
9879                                                         VK_ATTACHMENT_LOAD_OP_CLEAR)) {
9880                    clear_op_size = static_cast<uint32_t>(i) + 1;
9881                    std::function<bool()> function = [=]() {
9882                        SetImageMemoryValid(dev_data, getImageNode(dev_data, fb_info.image), true);
9883                        return false;
9884                    };
9885                    pCB->validate_functions.push_back(function);
9886                } else if (FormatSpecificLoadAndStoreOpSettings(format, renderPass->attachments[i].load_op,
9887                                                                renderPass->attachments[i].stencil_load_op,
9888                                                                VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
9889                    std::function<bool()> function = [=]() {
9890                        SetImageMemoryValid(dev_data, getImageNode(dev_data, fb_info.image), false);
9891                        return false;
9892                    };
9893                    pCB->validate_functions.push_back(function);
9894                } else if (FormatSpecificLoadAndStoreOpSettings(format, renderPass->attachments[i].load_op,
9895                                                                renderPass->attachments[i].stencil_load_op,
9896                                                                VK_ATTACHMENT_LOAD_OP_LOAD)) {
9897                    std::function<bool()> function = [=]() {
9898                        return ValidateImageMemoryIsValid(dev_data, getImageNode(dev_data, fb_info.image),
9899                                                          "vkCmdBeginRenderPass()");
9900                    };
9901                    pCB->validate_functions.push_back(function);
9902                }
9903                if (renderPass->attachment_first_read[renderPass->attachments[i].attachment]) {
9904                    std::function<bool()> function = [=]() {
9905                        return ValidateImageMemoryIsValid(dev_data, getImageNode(dev_data, fb_info.image),
9906                                                          "vkCmdBeginRenderPass()");
9907                    };
9908                    pCB->validate_functions.push_back(function);
9909                }
9910            }
9911            if (clear_op_size > pRenderPassBegin->clearValueCount) {
9912                skip_call |=
9913                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9914                            reinterpret_cast<uint64_t &>(renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
9915                            "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but there must "
9916                            "be at least %u "
9917                            "entries in pClearValues array to account for the highest index attachment in renderPass 0x%" PRIx64
9918                            " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array "
9919                            "is indexed by attachment number so even if some pClearValues entries between 0 and %u correspond to "
9920                            "attachments that aren't cleared they will be ignored.",
9921                            pRenderPassBegin->clearValueCount, clear_op_size, reinterpret_cast<uint64_t &>(renderPass),
9922                            clear_op_size, clear_op_size - 1);
9923            }
9924            skip_call |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
9925            skip_call |= VerifyFramebufferAndRenderPassLayouts(dev_data, pCB, pRenderPassBegin);
9926            skip_call |= insideRenderPass(dev_data, pCB, "vkCmdBeginRenderPass");
9927            skip_call |= ValidateDependencies(dev_data, framebuffer, renderPass);
9928            skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdBeginRenderPass");
9929            skip_call |= addCmd(dev_data, pCB, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
9930            pCB->activeRenderPass = renderPass;
9931            // This is a shallow copy as that is all that is needed for now
9932            pCB->activeRenderPassBeginInfo = *pRenderPassBegin;
9933            pCB->activeSubpass = 0;
9934            pCB->activeSubpassContents = contents;
9935            pCB->framebuffers.insert(pRenderPassBegin->framebuffer);
9936            // Connect this framebuffer to this cmdBuffer
9937            framebuffer->cb_bindings.insert(pCB);
9938
9939            // transition attachments to the correct layouts for the first subpass
9940            TransitionSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
9941        } else {
9942            skip_call |=
9943                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9944                        DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()");
9945        }
9946    }
9947    lock.unlock();
9948    if (!skip_call) {
9949        dev_data->device_dispatch_table->CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
9950    }
9951}
9952
9953VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
9954    bool skip_call = false;
9955    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9956    std::unique_lock<std::mutex> lock(global_lock);
9957    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9958    if (pCB) {
9959        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass");
9960        skip_call |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
9961        pCB->activeSubpass++;
9962        pCB->activeSubpassContents = contents;
9963        TransitionSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
9964        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass");
9965    }
9966    lock.unlock();
9967    if (!skip_call)
9968        dev_data->device_dispatch_table->CmdNextSubpass(commandBuffer, contents);
9969}
9970
9971VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
9972    bool skip_call = false;
9973    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9974    std::unique_lock<std::mutex> lock(global_lock);
9975    auto pCB = getCBNode(dev_data, commandBuffer);
9976    if (pCB) {
9977        RENDER_PASS_NODE* pRPNode = pCB->activeRenderPass;
9978        auto framebuffer = getFramebuffer(dev_data, pCB->activeFramebuffer);
9979        if (pRPNode) {
9980            for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
9981                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
9982                VkFormat format = pRPNode->pCreateInfo->pAttachments[pRPNode->attachments[i].attachment].format;
9983                if (FormatSpecificLoadAndStoreOpSettings(format, pRPNode->attachments[i].store_op,
9984                                                         pRPNode->attachments[i].stencil_store_op, VK_ATTACHMENT_STORE_OP_STORE)) {
9985                    std::function<bool()> function = [=]() {
9986                        SetImageMemoryValid(dev_data, getImageNode(dev_data, fb_info.image), true);
9987                        return false;
9988                    };
9989                    pCB->validate_functions.push_back(function);
9990                } else if (FormatSpecificLoadAndStoreOpSettings(format, pRPNode->attachments[i].store_op,
9991                                                                pRPNode->attachments[i].stencil_store_op,
9992                                                                VK_ATTACHMENT_STORE_OP_DONT_CARE)) {
9993                    std::function<bool()> function = [=]() {
9994                        SetImageMemoryValid(dev_data, getImageNode(dev_data, fb_info.image), false);
9995                        return false;
9996                    };
9997                    pCB->validate_functions.push_back(function);
9998                }
9999            }
10000        }
10001        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass");
10002        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass");
10003        skip_call |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
10004        TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo);
10005        pCB->activeRenderPass = nullptr;
10006        pCB->activeSubpass = 0;
10007        pCB->activeFramebuffer = VK_NULL_HANDLE;
10008    }
10009    lock.unlock();
10010    if (!skip_call)
10011        dev_data->device_dispatch_table->CmdEndRenderPass(commandBuffer);
10012}
10013
10014static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, uint32_t primaryAttach,
10015                                        uint32_t secondaryAttach, const char *msg) {
10016    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10017                   DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10018                   "vkCmdExecuteCommands() called w/ invalid Secondary Cmd Buffer 0x%" PRIx64 " which has a render pass "
10019                   "that is not compatible with the Primary Cmd Buffer current render pass. "
10020                   "Attachment %u is not compatible with %u: %s",
10021                   reinterpret_cast<uint64_t &>(secondaryBuffer), primaryAttach, secondaryAttach, msg);
10022}
10023
10024static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10025                                            VkRenderPassCreateInfo const *primaryPassCI, uint32_t primaryAttach,
10026                                            VkCommandBuffer secondaryBuffer, VkRenderPassCreateInfo const *secondaryPassCI,
10027                                            uint32_t secondaryAttach, bool is_multi) {
10028    bool skip_call = false;
10029    if (primaryPassCI->attachmentCount <= primaryAttach) {
10030        primaryAttach = VK_ATTACHMENT_UNUSED;
10031    }
10032    if (secondaryPassCI->attachmentCount <= secondaryAttach) {
10033        secondaryAttach = VK_ATTACHMENT_UNUSED;
10034    }
10035    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
10036        return skip_call;
10037    }
10038    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
10039        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
10040                                                 "The first is unused while the second is not.");
10041        return skip_call;
10042    }
10043    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
10044        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
10045                                                 "The second is unused while the first is not.");
10046        return skip_call;
10047    }
10048    if (primaryPassCI->pAttachments[primaryAttach].format != secondaryPassCI->pAttachments[secondaryAttach].format) {
10049        skip_call |=
10050            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different formats.");
10051    }
10052    if (primaryPassCI->pAttachments[primaryAttach].samples != secondaryPassCI->pAttachments[secondaryAttach].samples) {
10053        skip_call |=
10054            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different samples.");
10055    }
10056    if (is_multi && primaryPassCI->pAttachments[primaryAttach].flags != secondaryPassCI->pAttachments[secondaryAttach].flags) {
10057        skip_call |=
10058            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different flags.");
10059    }
10060    return skip_call;
10061}
10062
10063static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10064                                         VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
10065                                         VkRenderPassCreateInfo const *secondaryPassCI, const int subpass, bool is_multi) {
10066    bool skip_call = false;
10067    const VkSubpassDescription &primary_desc = primaryPassCI->pSubpasses[subpass];
10068    const VkSubpassDescription &secondary_desc = secondaryPassCI->pSubpasses[subpass];
10069    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
10070    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
10071        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
10072        if (i < primary_desc.inputAttachmentCount) {
10073            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
10074        }
10075        if (i < secondary_desc.inputAttachmentCount) {
10076            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
10077        }
10078        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_input_attach, secondaryBuffer,
10079                                                     secondaryPassCI, secondary_input_attach, is_multi);
10080    }
10081    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
10082    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
10083        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
10084        if (i < primary_desc.colorAttachmentCount) {
10085            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
10086        }
10087        if (i < secondary_desc.colorAttachmentCount) {
10088            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
10089        }
10090        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_color_attach, secondaryBuffer,
10091                                                     secondaryPassCI, secondary_color_attach, is_multi);
10092        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
10093        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
10094            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
10095        }
10096        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
10097            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
10098        }
10099        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_resolve_attach,
10100                                                     secondaryBuffer, secondaryPassCI, secondary_resolve_attach, is_multi);
10101    }
10102    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
10103    if (primary_desc.pDepthStencilAttachment) {
10104        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
10105    }
10106    if (secondary_desc.pDepthStencilAttachment) {
10107        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
10108    }
10109    skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_depthstencil_attach,
10110                                                 secondaryBuffer, secondaryPassCI, secondary_depthstencil_attach, is_multi);
10111    return skip_call;
10112}
10113
10114// Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
10115//  This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
10116//  will then feed into this function
10117static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10118                                            VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
10119                                            VkRenderPassCreateInfo const *secondaryPassCI) {
10120    bool skip_call = false;
10121
10122    if (primaryPassCI->subpassCount != secondaryPassCI->subpassCount) {
10123        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10124                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10125                             "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%" PRIx64
10126                             " that has a subpassCount of %u that is incompatible with the primary Cmd Buffer 0x%" PRIx64
10127                             " that has a subpassCount of %u.",
10128                             reinterpret_cast<uint64_t &>(secondaryBuffer), secondaryPassCI->subpassCount,
10129                             reinterpret_cast<uint64_t &>(primaryBuffer), primaryPassCI->subpassCount);
10130    } else {
10131        for (uint32_t i = 0; i < primaryPassCI->subpassCount; ++i) {
10132            skip_call |= validateSubpassCompatibility(dev_data, primaryBuffer, primaryPassCI, secondaryBuffer, secondaryPassCI, i,
10133                                                      primaryPassCI->subpassCount > 1);
10134        }
10135    }
10136    return skip_call;
10137}
10138
10139static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
10140                                VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
10141    bool skip_call = false;
10142    if (!pSubCB->beginInfo.pInheritanceInfo) {
10143        return skip_call;
10144    }
10145    VkFramebuffer primary_fb = pCB->activeFramebuffer;
10146    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
10147    if (secondary_fb != VK_NULL_HANDLE) {
10148        if (primary_fb != secondary_fb) {
10149            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10150                                 DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS",
10151                                 "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%" PRIx64
10152                                 " which has a framebuffer 0x%" PRIx64
10153                                 " that is not the same as the primaryCB's current active framebuffer 0x%" PRIx64 ".",
10154                                 reinterpret_cast<uint64_t &>(secondaryBuffer), reinterpret_cast<uint64_t &>(secondary_fb),
10155                                 reinterpret_cast<uint64_t &>(primary_fb));
10156        }
10157        auto fb = getFramebuffer(dev_data, secondary_fb);
10158        if (!fb) {
10159            skip_call |=
10160                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10161                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10162                                                                          "which has invalid framebuffer 0x%" PRIx64 ".",
10163                        (void *)secondaryBuffer, (uint64_t)(secondary_fb));
10164            return skip_call;
10165        }
10166        auto cb_renderpass = getRenderPass(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
10167        if (cb_renderpass->renderPass != fb->createInfo.renderPass) {
10168            skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb->renderPassCreateInfo.ptr(), secondaryBuffer,
10169                                                         cb_renderpass->pCreateInfo);
10170        }
10171    }
10172    return skip_call;
10173}
10174
10175static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
10176    bool skip_call = false;
10177    unordered_set<int> activeTypes;
10178    for (auto queryObject : pCB->activeQueries) {
10179        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
10180        if (queryPoolData != dev_data->queryPoolMap.end()) {
10181            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
10182                pSubCB->beginInfo.pInheritanceInfo) {
10183                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
10184                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
10185                    skip_call |= log_msg(
10186                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10187                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10188                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10189                        "which has invalid active query pool 0x%" PRIx64 ". Pipeline statistics is being queried so the command "
10190                        "buffer must have all bits set on the queryPool.",
10191                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first));
10192                }
10193            }
10194            activeTypes.insert(queryPoolData->second.createInfo.queryType);
10195        }
10196    }
10197    for (auto queryObject : pSubCB->startedQueries) {
10198        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
10199        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
10200            skip_call |=
10201                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10202                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10203                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10204                        "which has invalid active query pool 0x%" PRIx64 "of type %d but a query of that type has been started on "
10205                        "secondary Cmd Buffer 0x%p.",
10206                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first),
10207                        queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer));
10208        }
10209    }
10210
10211    auto primary_pool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool);
10212    auto secondary_pool = getCommandPoolNode(dev_data, pSubCB->createInfo.commandPool);
10213    if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
10214        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10215                             reinterpret_cast<uint64_t>(pSubCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
10216                             "vkCmdExecuteCommands(): Primary command buffer 0x%" PRIxLEAST64
10217                             " created in queue family %d has secondary command buffer 0x%" PRIxLEAST64 " created in queue family %d.",
10218                             reinterpret_cast<uint64_t>(pCB->commandBuffer), primary_pool->queueFamilyIndex,
10219                             reinterpret_cast<uint64_t>(pSubCB->commandBuffer), secondary_pool->queueFamilyIndex);
10220    }
10221
10222    return skip_call;
10223}
10224
10225VKAPI_ATTR void VKAPI_CALL
10226CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
10227    bool skip_call = false;
10228    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10229    std::unique_lock<std::mutex> lock(global_lock);
10230    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
10231    if (pCB) {
10232        GLOBAL_CB_NODE *pSubCB = NULL;
10233        for (uint32_t i = 0; i < commandBuffersCount; i++) {
10234            pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
10235            if (!pSubCB) {
10236                skip_call |=
10237                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10238                            DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10239                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p in element %u of pCommandBuffers array.",
10240                            (void *)pCommandBuffers[i], i);
10241            } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
10242                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10243                                     __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10244                                     "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%p in element %u of pCommandBuffers "
10245                                     "array. All cmd buffers in pCommandBuffers array must be secondary.",
10246                                     (void *)pCommandBuffers[i], i);
10247            } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
10248                auto secondary_rp_node = getRenderPass(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
10249                if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
10250                    skip_call |= log_msg(
10251                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10252                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
10253                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) executed within render pass (0x%" PRIxLEAST64
10254                        ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
10255                        (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass->renderPass);
10256                } else {
10257                    // Make sure render pass is compatible with parent command buffer pass if has continue
10258                    if (pCB->activeRenderPass->renderPass != secondary_rp_node->renderPass) {
10259                        skip_call |= validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass->pCreateInfo,
10260                                                                    pCommandBuffers[i], secondary_rp_node->pCreateInfo);
10261                    }
10262                    //  If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
10263                    skip_call |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
10264                }
10265                string errorString = "";
10266                // secondaryCB must have been created w/ RP compatible w/ primaryCB active renderpass
10267                if ((pCB->activeRenderPass->renderPass != secondary_rp_node->renderPass) &&
10268                    !verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->pCreateInfo, secondary_rp_node->pCreateInfo,
10269                                                     errorString)) {
10270                    skip_call |= log_msg(
10271                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10272                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
10273                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) w/ render pass (0x%" PRIxLEAST64
10274                        ") is incompatible w/ primary command buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
10275                        (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer,
10276                        (uint64_t)pCB->activeRenderPass->renderPass, errorString.c_str());
10277                }
10278            }
10279            // TODO(mlentine): Move more logic into this method
10280            skip_call |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
10281            skip_call |= validateCommandBufferState(dev_data, pSubCB);
10282            // Secondary cmdBuffers are considered pending execution starting w/
10283            // being recorded
10284            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
10285                if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
10286                    skip_call |= log_msg(
10287                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10288                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10289                        "Attempt to simultaneously execute CB 0x%" PRIxLEAST64 " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10290                        "set!",
10291                        (uint64_t)(pCB->commandBuffer));
10292                }
10293                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
10294                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
10295                    skip_call |= log_msg(
10296                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10297                        (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10298                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIxLEAST64
10299                        ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
10300                        "(0x%" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10301                        "set, even though it does.",
10302                        (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer));
10303                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
10304                }
10305            }
10306            if (!pCB->activeQueries.empty() && !dev_data->phys_dev_properties.features.inheritedQueries) {
10307                skip_call |=
10308                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10309                            reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
10310                            "vkCmdExecuteCommands(): Secondary Command Buffer "
10311                            "(0x%" PRIxLEAST64 ") cannot be submitted with a query in "
10312                            "flight and inherited queries not "
10313                            "supported on this device.",
10314                            reinterpret_cast<uint64_t>(pCommandBuffers[i]));
10315            }
10316            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
10317            pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
10318            dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
10319            for (auto &function : pSubCB->queryUpdates) {
10320                pCB->queryUpdates.push_back(function);
10321            }
10322        }
10323        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands");
10324        skip_call |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
10325    }
10326    lock.unlock();
10327    if (!skip_call)
10328        dev_data->device_dispatch_table->CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
10329}
10330
10331// For any image objects that overlap mapped memory, verify that their layouts are PREINIT or GENERAL
10332static bool ValidateMapImageLayouts(VkDevice device, DEVICE_MEM_INFO const *mem_info, VkDeviceSize offset,
10333                                    VkDeviceSize end_offset) {
10334    bool skip_call = false;
10335    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10336    // Iterate over all bound image ranges and verify that for any that overlap the
10337    //  map ranges, the layouts are VK_IMAGE_LAYOUT_PREINITIALIZED or VK_IMAGE_LAYOUT_GENERAL
10338    // TODO : This can be optimized if we store ranges based on starting address and early exit when we pass our range
10339    for (auto image_handle : mem_info->bound_images) {
10340        auto img_it = mem_info->bound_ranges.find(image_handle);
10341        if (img_it != mem_info->bound_ranges.end()) {
10342            if (rangesIntersect(dev_data, &img_it->second, offset, end_offset)) {
10343                std::vector<VkImageLayout> layouts;
10344                if (FindLayouts(dev_data, VkImage(image_handle), layouts)) {
10345                    for (auto layout : layouts) {
10346                        if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
10347                            skip_call |=
10348                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10349                                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
10350                                                                                        "GENERAL or PREINITIALIZED are supported.",
10351                                        string_VkImageLayout(layout));
10352                        }
10353                    }
10354                }
10355            }
10356        }
10357    }
10358    return skip_call;
10359}
10360
10361VKAPI_ATTR VkResult VKAPI_CALL
10362MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
10363    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10364
10365    bool skip_call = false;
10366    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10367    std::unique_lock<std::mutex> lock(global_lock);
10368    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
10369    if (mem_info) {
10370        // TODO : This could me more fine-grained to track just region that is valid
10371        mem_info->global_valid = true;
10372        auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1;
10373        skip_call |= ValidateMapImageLayouts(device, mem_info, offset, end_offset);
10374        // TODO : Do we need to create new "bound_range" for the mapped range?
10375        SetMemRangesValid(dev_data, mem_info, offset, end_offset);
10376        if ((dev_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
10377             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
10378            skip_call =
10379                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10380                        (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM",
10381                        "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
10382        }
10383    }
10384    skip_call |= ValidateMapMemRange(dev_data, mem, offset, size);
10385    lock.unlock();
10386
10387    if (!skip_call) {
10388        result = dev_data->device_dispatch_table->MapMemory(device, mem, offset, size, flags, ppData);
10389        if (VK_SUCCESS == result) {
10390            lock.lock();
10391            // TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
10392            storeMemRanges(dev_data, mem, offset, size);
10393            initializeAndTrackMemory(dev_data, mem, offset, size, ppData);
10394            lock.unlock();
10395        }
10396    }
10397    return result;
10398}
10399
10400VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
10401    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10402    bool skip_call = false;
10403
10404    std::unique_lock<std::mutex> lock(global_lock);
10405    skip_call |= deleteMemRanges(my_data, mem);
10406    lock.unlock();
10407    if (!skip_call) {
10408        my_data->device_dispatch_table->UnmapMemory(device, mem);
10409    }
10410}
10411
10412static bool validateMemoryIsMapped(layer_data *my_data, const char *funcName, uint32_t memRangeCount,
10413                                   const VkMappedMemoryRange *pMemRanges) {
10414    bool skip_call = false;
10415    for (uint32_t i = 0; i < memRangeCount; ++i) {
10416        auto mem_info = getMemObjInfo(my_data, pMemRanges[i].memory);
10417        if (mem_info) {
10418            if (mem_info->mem_range.offset > pMemRanges[i].offset) {
10419                skip_call |=
10420                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10421                            (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10422                            "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
10423                            "(" PRINTF_SIZE_T_SPECIFIER ").",
10424                            funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_info->mem_range.offset));
10425            }
10426
10427            const uint64_t my_dataTerminus = (mem_info->mem_range.size == VK_WHOLE_SIZE)
10428                                                 ? mem_info->alloc_info.allocationSize
10429                                                 : (mem_info->mem_range.offset + mem_info->mem_range.size);
10430            if (pMemRanges[i].size != VK_WHOLE_SIZE && (my_dataTerminus < (pMemRanges[i].offset + pMemRanges[i].size))) {
10431                skip_call |= log_msg(
10432                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10433                    (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10434                    "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER ") exceeds the Memory Object's upper-bound "
10435                    "(" PRINTF_SIZE_T_SPECIFIER ").",
10436                    funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size), static_cast<size_t>(my_dataTerminus));
10437            }
10438        }
10439    }
10440    return skip_call;
10441}
10442
10443static bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data *my_data, uint32_t memRangeCount,
10444                                                     const VkMappedMemoryRange *pMemRanges) {
10445    bool skip_call = false;
10446    for (uint32_t i = 0; i < memRangeCount; ++i) {
10447        auto mem_info = getMemObjInfo(my_data, pMemRanges[i].memory);
10448        if (mem_info) {
10449            if (mem_info->shadow_copy) {
10450                VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
10451                                        ? mem_info->mem_range.size
10452                                        : (mem_info->alloc_info.allocationSize - pMemRanges[i].offset);
10453                char *data = static_cast<char *>(mem_info->shadow_copy);
10454                for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
10455                    if (data[j] != NoncoherentMemoryFillValue) {
10456                        skip_call |= log_msg(
10457                            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10458                            (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10459                            "Memory underflow was detected on mem obj 0x%" PRIxLEAST64, (uint64_t)pMemRanges[i].memory);
10460                    }
10461                }
10462                for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
10463                    if (data[j] != NoncoherentMemoryFillValue) {
10464                        skip_call |= log_msg(
10465                            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10466                            (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10467                            "Memory overflow was detected on mem obj 0x%" PRIxLEAST64, (uint64_t)pMemRanges[i].memory);
10468                    }
10469                }
10470                memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
10471            }
10472        }
10473    }
10474    return skip_call;
10475}
10476
10477static void CopyNoncoherentMemoryFromDriver(layer_data *my_data, uint32_t memory_range_count,
10478                                            const VkMappedMemoryRange *mem_ranges) {
10479    for (uint32_t i = 0; i < memory_range_count; ++i) {
10480        auto mem_info = getMemObjInfo(my_data, mem_ranges[i].memory);
10481        if (mem_info && mem_info->shadow_copy) {
10482            VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
10483                                    ? mem_info->mem_range.size
10484                                    : (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
10485            char *data = static_cast<char *>(mem_info->shadow_copy);
10486            memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
10487        }
10488    }
10489}
10490
10491VkResult VKAPI_CALL
10492FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10493    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10494    bool skip_call = false;
10495    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10496
10497    std::unique_lock<std::mutex> lock(global_lock);
10498    skip_call |= ValidateAndCopyNoncoherentMemoryToDriver(my_data, memRangeCount, pMemRanges);
10499    skip_call |= validateMemoryIsMapped(my_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
10500    lock.unlock();
10501    if (!skip_call) {
10502        result = my_data->device_dispatch_table->FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
10503    }
10504    return result;
10505}
10506
10507VkResult VKAPI_CALL
10508InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10509    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10510    bool skip_call = false;
10511    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10512
10513    std::unique_lock<std::mutex> lock(global_lock);
10514    skip_call |= validateMemoryIsMapped(my_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
10515    lock.unlock();
10516    if (!skip_call) {
10517        result = my_data->device_dispatch_table->InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
10518        // Update our shadow copy with modified driver data
10519        CopyNoncoherentMemoryFromDriver(my_data, memRangeCount, pMemRanges);
10520    }
10521    return result;
10522}
10523
10524VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
10525    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10526    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10527    bool skip_call = false;
10528    std::unique_lock<std::mutex> lock(global_lock);
10529    auto image_node = getImageNode(dev_data, image);
10530    if (image_node) {
10531        // Track objects tied to memory
10532        uint64_t image_handle = reinterpret_cast<uint64_t &>(image);
10533        skip_call = set_mem_binding(dev_data, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
10534        VkMemoryRequirements memRequirements;
10535        lock.unlock();
10536        dev_data->device_dispatch_table->GetImageMemoryRequirements(device, image, &memRequirements);
10537        lock.lock();
10538
10539        // Track and validate bound memory range information
10540        auto mem_info = getMemObjInfo(dev_data, mem);
10541        if (mem_info) {
10542            skip_call |= InsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, memRequirements,
10543                                                image_node->createInfo.tiling == VK_IMAGE_TILING_LINEAR);
10544            skip_call |= ValidateMemoryTypes(dev_data, mem_info, memRequirements.memoryTypeBits, "vkBindImageMemory");
10545        }
10546
10547        print_mem_list(dev_data);
10548        lock.unlock();
10549        if (!skip_call) {
10550            result = dev_data->device_dispatch_table->BindImageMemory(device, image, mem, memoryOffset);
10551            lock.lock();
10552            image_node->mem = mem;
10553            image_node->memOffset = memoryOffset;
10554            image_node->memSize = memRequirements.size;
10555            lock.unlock();
10556        }
10557    } else {
10558        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10559                reinterpret_cast<const uint64_t &>(image), __LINE__, MEMTRACK_INVALID_OBJECT, "MT",
10560                "vkBindImageMemory: Cannot find invalid image 0x%" PRIx64 ", has it already been deleted?",
10561                reinterpret_cast<const uint64_t &>(image));
10562    }
10563    return result;
10564}
10565
10566VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
10567    bool skip_call = false;
10568    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10569    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10570    std::unique_lock<std::mutex> lock(global_lock);
10571    auto event_node = getEventNode(dev_data, event);
10572    if (event_node) {
10573        event_node->needsSignaled = false;
10574        event_node->stageMask = VK_PIPELINE_STAGE_HOST_BIT;
10575        if (event_node->write_in_use) {
10576            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
10577                                 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10578                                 "Cannot call vkSetEvent() on event 0x%" PRIxLEAST64 " that is already in use by a command buffer.",
10579                                 reinterpret_cast<const uint64_t &>(event));
10580        }
10581    }
10582    lock.unlock();
10583    // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
10584    // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
10585    // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
10586    for (auto queue_data : dev_data->queueMap) {
10587        auto event_entry = queue_data.second.eventToStageMap.find(event);
10588        if (event_entry != queue_data.second.eventToStageMap.end()) {
10589            event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
10590        }
10591    }
10592    if (!skip_call)
10593        result = dev_data->device_dispatch_table->SetEvent(device, event);
10594    return result;
10595}
10596
10597VKAPI_ATTR VkResult VKAPI_CALL
10598QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
10599    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10600    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10601    bool skip_call = false;
10602    std::unique_lock<std::mutex> lock(global_lock);
10603    auto pFence = getFenceNode(dev_data, fence);
10604    auto pQueue = getQueueNode(dev_data, queue);
10605
10606    // First verify that fence is not in use
10607    skip_call |= ValidateFenceForSubmit(dev_data, pFence);
10608
10609    if (pFence) {
10610        SubmitFence(pQueue, pFence, bindInfoCount);
10611    }
10612
10613    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
10614        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
10615        // Track objects tied to memory
10616        for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
10617            for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
10618                if (set_sparse_mem_binding(dev_data, bindInfo.pBufferBinds[j].pBinds[k].memory,
10619                                           (uint64_t)bindInfo.pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
10620                                           "vkQueueBindSparse"))
10621                    skip_call = true;
10622            }
10623        }
10624        for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
10625            for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
10626                if (set_sparse_mem_binding(dev_data, bindInfo.pImageOpaqueBinds[j].pBinds[k].memory,
10627                                           (uint64_t)bindInfo.pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10628                                           "vkQueueBindSparse"))
10629                    skip_call = true;
10630            }
10631        }
10632        for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
10633            for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
10634                if (set_sparse_mem_binding(dev_data, bindInfo.pImageBinds[j].pBinds[k].memory,
10635                                           (uint64_t)bindInfo.pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10636                                           "vkQueueBindSparse"))
10637                    skip_call = true;
10638            }
10639        }
10640
10641        std::vector<SEMAPHORE_WAIT> semaphore_waits;
10642        std::vector<VkSemaphore> semaphore_signals;
10643        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
10644            VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
10645            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
10646            if (pSemaphore) {
10647                if (pSemaphore->signaled) {
10648                    if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
10649                        semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
10650                        pSemaphore->in_use.fetch_add(1);
10651                    }
10652                    pSemaphore->signaler.first = VK_NULL_HANDLE;
10653                    pSemaphore->signaled = false;
10654                } else {
10655                    skip_call |=
10656                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10657                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10658                                "vkQueueBindSparse: Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64
10659                                " that has no way to be signaled.",
10660                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
10661                }
10662            }
10663        }
10664        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
10665            VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
10666            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
10667            if (pSemaphore) {
10668                if (pSemaphore->signaled) {
10669                    skip_call =
10670                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10671                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10672                                "vkQueueBindSparse: Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
10673                                ", but that semaphore is already signaled.",
10674                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
10675                }
10676                else {
10677                    pSemaphore->signaler.first = queue;
10678                    pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
10679                    pSemaphore->signaled = true;
10680                    pSemaphore->in_use.fetch_add(1);
10681                    semaphore_signals.push_back(semaphore);
10682                }
10683            }
10684        }
10685
10686        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
10687                                         semaphore_waits,
10688                                         semaphore_signals,
10689                                         bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE);
10690    }
10691
10692    if (pFence && !bindInfoCount) {
10693        // No work to do, just dropping a fence in the queue by itself.
10694        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
10695                                         std::vector<SEMAPHORE_WAIT>(),
10696                                         std::vector<VkSemaphore>(),
10697                                         fence);
10698    }
10699
10700    print_mem_list(dev_data);
10701    lock.unlock();
10702
10703    if (!skip_call)
10704        return dev_data->device_dispatch_table->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
10705
10706    return result;
10707}
10708
10709VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
10710                                               const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
10711    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10712    VkResult result = dev_data->device_dispatch_table->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
10713    if (result == VK_SUCCESS) {
10714        std::lock_guard<std::mutex> lock(global_lock);
10715        SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
10716        sNode->signaler.first = VK_NULL_HANDLE;
10717        sNode->signaler.second = 0;
10718        sNode->signaled = false;
10719        sNode->in_use.store(0);
10720    }
10721    return result;
10722}
10723
10724VKAPI_ATTR VkResult VKAPI_CALL
10725CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
10726    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10727    VkResult result = dev_data->device_dispatch_table->CreateEvent(device, pCreateInfo, pAllocator, pEvent);
10728    if (result == VK_SUCCESS) {
10729        std::lock_guard<std::mutex> lock(global_lock);
10730        dev_data->eventMap[*pEvent].needsSignaled = false;
10731        dev_data->eventMap[*pEvent].in_use.store(0);
10732        dev_data->eventMap[*pEvent].write_in_use = 0;
10733        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
10734    }
10735    return result;
10736}
10737
10738VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
10739                                                  const VkAllocationCallbacks *pAllocator,
10740                                                  VkSwapchainKHR *pSwapchain) {
10741    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10742    VkResult result = dev_data->device_dispatch_table->CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
10743
10744    if (VK_SUCCESS == result) {
10745        std::lock_guard<std::mutex> lock(global_lock);
10746        dev_data->device_extensions.swapchainMap[*pSwapchain] = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo));
10747    }
10748
10749    return result;
10750}
10751
10752VKAPI_ATTR void VKAPI_CALL
10753DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
10754    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10755    bool skip_call = false;
10756
10757    std::unique_lock<std::mutex> lock(global_lock);
10758    auto swapchain_data = getSwapchainNode(dev_data, swapchain);
10759    if (swapchain_data) {
10760        if (swapchain_data->images.size() > 0) {
10761            for (auto swapchain_image : swapchain_data->images) {
10762                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
10763                if (image_sub != dev_data->imageSubresourceMap.end()) {
10764                    for (auto imgsubpair : image_sub->second) {
10765                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
10766                        if (image_item != dev_data->imageLayoutMap.end()) {
10767                            dev_data->imageLayoutMap.erase(image_item);
10768                        }
10769                    }
10770                    dev_data->imageSubresourceMap.erase(image_sub);
10771                }
10772                skip_call =
10773                    clear_object_binding(dev_data, (uint64_t)swapchain_image, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
10774                dev_data->imageMap.erase(swapchain_image);
10775            }
10776        }
10777        dev_data->device_extensions.swapchainMap.erase(swapchain);
10778    }
10779    lock.unlock();
10780    if (!skip_call)
10781        dev_data->device_dispatch_table->DestroySwapchainKHR(device, swapchain, pAllocator);
10782}
10783
10784VKAPI_ATTR VkResult VKAPI_CALL
10785GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
10786    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10787    VkResult result = dev_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
10788
10789    if (result == VK_SUCCESS && pSwapchainImages != NULL) {
10790        // This should never happen and is checked by param checker.
10791        if (!pCount)
10792            return result;
10793        std::lock_guard<std::mutex> lock(global_lock);
10794        const size_t count = *pCount;
10795        auto swapchain_node = getSwapchainNode(dev_data, swapchain);
10796        if (swapchain_node && !swapchain_node->images.empty()) {
10797            // TODO : Not sure I like the memcmp here, but it works
10798            const bool mismatch = (swapchain_node->images.size() != count ||
10799                                   memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
10800            if (mismatch) {
10801                // TODO: Verify against Valid Usage section of extension
10802                log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10803                        (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
10804                        "vkGetSwapchainInfoKHR(0x%" PRIx64
10805                        ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
10806                        (uint64_t)(swapchain));
10807            }
10808        }
10809        for (uint32_t i = 0; i < *pCount; ++i) {
10810            IMAGE_LAYOUT_NODE image_layout_node;
10811            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
10812            image_layout_node.format = swapchain_node->createInfo.imageFormat;
10813            // Add imageMap entries for each swapchain image
10814            VkImageCreateInfo image_ci = {};
10815            image_ci.mipLevels = 1;
10816            image_ci.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
10817            image_ci.usage = swapchain_node->createInfo.imageUsage;
10818            image_ci.format = swapchain_node->createInfo.imageFormat;
10819            image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
10820            image_ci.extent.width = swapchain_node->createInfo.imageExtent.width;
10821            image_ci.extent.height = swapchain_node->createInfo.imageExtent.height;
10822            image_ci.sharingMode = swapchain_node->createInfo.imageSharingMode;
10823            dev_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_NODE>(new IMAGE_NODE(pSwapchainImages[i], &image_ci));
10824            auto &image_node = dev_data->imageMap[pSwapchainImages[i]];
10825            image_node->valid = false;
10826            image_node->mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
10827            swapchain_node->images.push_back(pSwapchainImages[i]);
10828            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
10829            dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
10830            dev_data->imageLayoutMap[subpair] = image_layout_node;
10831            dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
10832        }
10833    }
10834    return result;
10835}
10836
10837VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
10838    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10839    bool skip_call = false;
10840
10841    std::lock_guard<std::mutex> lock(global_lock);
10842    for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10843        auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
10844        if (pSemaphore && !pSemaphore->signaled) {
10845            skip_call |=
10846                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10847                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10848                            "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
10849                            reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(pPresentInfo->pWaitSemaphores[i]));
10850        }
10851    }
10852
10853    for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
10854        auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
10855        if (swapchain_data && pPresentInfo->pImageIndices[i] < swapchain_data->images.size()) {
10856            VkImage image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
10857            skip_call |= ValidateImageMemoryIsValid(dev_data, getImageNode(dev_data, image), "vkQueuePresentKHR()");
10858            vector<VkImageLayout> layouts;
10859            if (FindLayouts(dev_data, image, layouts)) {
10860                for (auto layout : layouts) {
10861                    if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
10862                        skip_call |=
10863                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
10864                                        reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10865                                        "Images passed to present must be in layout "
10866                                        "PRESENT_SOURCE_KHR but is in %s",
10867                                        string_VkImageLayout(layout));
10868                    }
10869                }
10870            }
10871        }
10872    }
10873
10874    if (skip_call) {
10875        return VK_ERROR_VALIDATION_FAILED_EXT;
10876    }
10877
10878    VkResult result = dev_data->device_dispatch_table->QueuePresentKHR(queue, pPresentInfo);
10879
10880    if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
10881        // Semaphore waits occur before error generation, if the call reached
10882        // the ICD. (Confirm?)
10883        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10884            auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
10885            if (pSemaphore) {
10886                pSemaphore->signaler.first = VK_NULL_HANDLE;
10887                pSemaphore->signaled = false;
10888            }
10889        }
10890
10891        // Note: even though presentation is directed to a queue, there is no
10892        // direct ordering between QP and subsequent work, so QP (and its
10893        // semaphore waits) /never/ participate in any completion proof.
10894    }
10895
10896    return result;
10897}
10898
10899VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
10900                                                   VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
10901    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10902    bool skip_call = false;
10903
10904    std::unique_lock<std::mutex> lock(global_lock);
10905    auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
10906    if (pSemaphore && pSemaphore->signaled) {
10907        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10908                             reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10909                             "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state");
10910    }
10911
10912    auto pFence = getFenceNode(dev_data, fence);
10913    if (pFence) {
10914        skip_call |= ValidateFenceForSubmit(dev_data, pFence);
10915    }
10916    lock.unlock();
10917
10918    if (skip_call)
10919        return VK_ERROR_VALIDATION_FAILED_EXT;
10920
10921    VkResult result =
10922            dev_data->device_dispatch_table->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
10923
10924    lock.lock();
10925    if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
10926        if (pFence) {
10927            pFence->state = FENCE_INFLIGHT;
10928            pFence->signaler.first = VK_NULL_HANDLE;   // ANI isn't on a queue, so this can't participate in a completion proof.
10929        }
10930
10931        // A successful call to AcquireNextImageKHR counts as a signal operation on semaphore
10932        if (pSemaphore) {
10933            pSemaphore->signaled = true;
10934            pSemaphore->signaler.first = VK_NULL_HANDLE;
10935        }
10936    }
10937    lock.unlock();
10938
10939    return result;
10940}
10941
10942VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
10943                                                        VkPhysicalDevice *pPhysicalDevices) {
10944    bool skip_call = false;
10945    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10946    if (my_data->instance_state) {
10947        // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS
10948        if (NULL == pPhysicalDevices) {
10949            my_data->instance_state->vkEnumeratePhysicalDevicesState = QUERY_COUNT;
10950        } else {
10951            if (UNCALLED == my_data->instance_state->vkEnumeratePhysicalDevicesState) {
10952                // Flag warning here. You can call this without having queried the count, but it may not be
10953                // robust on platforms with multiple physical devices.
10954                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
10955                                    0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
10956                                    "Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first "
10957                                    "call vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
10958            } // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
10959            else if (my_data->instance_state->physical_devices_count != *pPhysicalDeviceCount) {
10960                // Having actual count match count from app is not a requirement, so this can be a warning
10961                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
10962                                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
10963                                    "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count "
10964                                    "supported by this instance is %u.",
10965                                    *pPhysicalDeviceCount, my_data->instance_state->physical_devices_count);
10966            }
10967            my_data->instance_state->vkEnumeratePhysicalDevicesState = QUERY_DETAILS;
10968        }
10969        if (skip_call) {
10970            return VK_ERROR_VALIDATION_FAILED_EXT;
10971        }
10972        VkResult result =
10973            my_data->instance_dispatch_table->EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
10974        if (NULL == pPhysicalDevices) {
10975            my_data->instance_state->physical_devices_count = *pPhysicalDeviceCount;
10976        } else { // Save physical devices
10977            for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
10978                layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(pPhysicalDevices[i]), layer_data_map);
10979                phy_dev_data->physical_device_state = unique_ptr<PHYSICAL_DEVICE_STATE>(new PHYSICAL_DEVICE_STATE());
10980                // Init actual features for each physical device
10981                my_data->instance_dispatch_table->GetPhysicalDeviceFeatures(pPhysicalDevices[i],
10982                                                                            &phy_dev_data->physical_device_features);
10983            }
10984        }
10985        return result;
10986    } else {
10987        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__,
10988                DEVLIMITS_INVALID_INSTANCE, "DL", "Invalid instance (0x%" PRIxLEAST64 ") passed into vkEnumeratePhysicalDevices().",
10989                (uint64_t)instance);
10990    }
10991    return VK_ERROR_VALIDATION_FAILED_EXT;
10992}
10993
10994VKAPI_ATTR void VKAPI_CALL
10995GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
10996    VkQueueFamilyProperties *pQueueFamilyProperties) {
10997    bool skip_call = false;
10998    layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
10999    if (phy_dev_data->physical_device_state) {
11000        if (NULL == pQueueFamilyProperties) {
11001            phy_dev_data->physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
11002        }
11003        else {
11004            // Verify that for each physical device, this function is called first with NULL pQueueFamilyProperties ptr in order to
11005            // get count
11006            if (UNCALLED == phy_dev_data->physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
11007                skip_call |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11008                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
11009                    "Call sequence has vkGetPhysicalDeviceQueueFamilyProperties() w/ non-NULL "
11010                    "pQueueFamilyProperties. You should first call vkGetPhysicalDeviceQueueFamilyProperties() w/ "
11011                    "NULL pQueueFamilyProperties to query pCount.");
11012            }
11013            // Then verify that pCount that is passed in on second call matches what was returned
11014            if (phy_dev_data->physical_device_state->queueFamilyPropertiesCount != *pCount) {
11015
11016                // TODO: this is not a requirement of the Valid Usage section for vkGetPhysicalDeviceQueueFamilyProperties, so
11017                // provide as warning
11018                skip_call |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11019                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
11020                    "Call to vkGetPhysicalDeviceQueueFamilyProperties() w/ pCount value %u, but actual count "
11021                    "supported by this physicalDevice is %u.",
11022                    *pCount, phy_dev_data->physical_device_state->queueFamilyPropertiesCount);
11023            }
11024            phy_dev_data->physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
11025        }
11026        if (skip_call) {
11027            return;
11028        }
11029        phy_dev_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pCount,
11030            pQueueFamilyProperties);
11031        if (NULL == pQueueFamilyProperties) {
11032            phy_dev_data->physical_device_state->queueFamilyPropertiesCount = *pCount;
11033        }
11034        else { // Save queue family properties
11035            phy_dev_data->queue_family_properties.reserve(*pCount);
11036            for (uint32_t i = 0; i < *pCount; i++) {
11037                phy_dev_data->queue_family_properties.emplace_back(new VkQueueFamilyProperties(pQueueFamilyProperties[i]));
11038            }
11039        }
11040        return;
11041    }
11042    else {
11043        log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
11044            __LINE__, DEVLIMITS_INVALID_PHYSICAL_DEVICE, "DL",
11045            "Invalid physicalDevice (0x%" PRIxLEAST64 ") passed into vkGetPhysicalDeviceQueueFamilyProperties().",
11046            (uint64_t)physicalDevice);
11047    }
11048}
11049
11050VKAPI_ATTR VkResult VKAPI_CALL
11051CreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
11052                             const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
11053    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
11054    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
11055    VkResult res = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
11056    if (VK_SUCCESS == res) {
11057        std::lock_guard<std::mutex> lock(global_lock);
11058        res = layer_create_msg_callback(my_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
11059    }
11060    return res;
11061}
11062
11063VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance,
11064                                                         VkDebugReportCallbackEXT msgCallback,
11065                                                         const VkAllocationCallbacks *pAllocator) {
11066    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
11067    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
11068    pTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
11069    std::lock_guard<std::mutex> lock(global_lock);
11070    layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator);
11071}
11072
11073VKAPI_ATTR void VKAPI_CALL
11074DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
11075                      size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
11076    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
11077    my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
11078                                                            pMsg);
11079}
11080
11081VKAPI_ATTR VkResult VKAPI_CALL
11082EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
11083    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
11084}
11085
11086VKAPI_ATTR VkResult VKAPI_CALL
11087EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
11088    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
11089}
11090
11091VKAPI_ATTR VkResult VKAPI_CALL
11092EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
11093    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
11094        return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
11095
11096    return VK_ERROR_LAYER_NOT_PRESENT;
11097}
11098
11099VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
11100                                                                  const char *pLayerName, uint32_t *pCount,
11101                                                                  VkExtensionProperties *pProperties) {
11102    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
11103        return util_GetExtensionProperties(0, NULL, pCount, pProperties);
11104
11105    assert(physicalDevice);
11106
11107    dispatch_key key = get_dispatch_key(physicalDevice);
11108    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
11109    return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
11110}
11111
11112static PFN_vkVoidFunction
11113intercept_core_instance_command(const char *name);
11114
11115static PFN_vkVoidFunction
11116intercept_core_device_command(const char *name);
11117
11118static PFN_vkVoidFunction
11119intercept_khr_swapchain_command(const char *name, VkDevice dev);
11120
11121VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice dev, const char *funcName) {
11122    PFN_vkVoidFunction proc = intercept_core_device_command(funcName);
11123    if (proc)
11124        return proc;
11125
11126    assert(dev);
11127
11128    proc = intercept_khr_swapchain_command(funcName, dev);
11129    if (proc)
11130        return proc;
11131
11132    layer_data *dev_data;
11133    dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
11134
11135    VkLayerDispatchTable *pTable = dev_data->device_dispatch_table;
11136    {
11137        if (pTable->GetDeviceProcAddr == NULL)
11138            return NULL;
11139        return pTable->GetDeviceProcAddr(dev, funcName);
11140    }
11141}
11142
11143VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
11144    PFN_vkVoidFunction proc = intercept_core_instance_command(funcName);
11145    if (!proc)
11146        proc = intercept_core_device_command(funcName);
11147    if (!proc)
11148        proc = intercept_khr_swapchain_command(funcName, VK_NULL_HANDLE);
11149    if (proc)
11150        return proc;
11151
11152    assert(instance);
11153
11154    layer_data *my_data;
11155    my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
11156    proc = debug_report_get_instance_proc_addr(my_data->report_data, funcName);
11157    if (proc)
11158        return proc;
11159
11160    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
11161    if (pTable->GetInstanceProcAddr == NULL)
11162        return NULL;
11163    return pTable->GetInstanceProcAddr(instance, funcName);
11164}
11165
11166static PFN_vkVoidFunction
11167intercept_core_instance_command(const char *name) {
11168    static const struct {
11169        const char *name;
11170        PFN_vkVoidFunction proc;
11171    } core_instance_commands[] = {
11172        { "vkGetInstanceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetInstanceProcAddr) },
11173        { "vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr) },
11174        { "vkCreateInstance", reinterpret_cast<PFN_vkVoidFunction>(CreateInstance) },
11175        { "vkCreateDevice", reinterpret_cast<PFN_vkVoidFunction>(CreateDevice) },
11176        { "vkEnumeratePhysicalDevices", reinterpret_cast<PFN_vkVoidFunction>(EnumeratePhysicalDevices) },
11177        { "vkGetPhysicalDeviceQueueFamilyProperties", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceQueueFamilyProperties) },
11178        { "vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance) },
11179        { "vkEnumerateInstanceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceLayerProperties) },
11180        { "vkEnumerateDeviceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceLayerProperties) },
11181        { "vkEnumerateInstanceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceExtensionProperties) },
11182        { "vkEnumerateDeviceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceExtensionProperties) },
11183    };
11184
11185    for (size_t i = 0; i < ARRAY_SIZE(core_instance_commands); i++) {
11186        if (!strcmp(core_instance_commands[i].name, name))
11187            return core_instance_commands[i].proc;
11188    }
11189
11190    return nullptr;
11191}
11192
11193static PFN_vkVoidFunction
11194intercept_core_device_command(const char *name) {
11195    static const struct {
11196        const char *name;
11197        PFN_vkVoidFunction proc;
11198    } core_device_commands[] = {
11199        {"vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr)},
11200        {"vkQueueSubmit", reinterpret_cast<PFN_vkVoidFunction>(QueueSubmit)},
11201        {"vkWaitForFences", reinterpret_cast<PFN_vkVoidFunction>(WaitForFences)},
11202        {"vkGetFenceStatus", reinterpret_cast<PFN_vkVoidFunction>(GetFenceStatus)},
11203        {"vkQueueWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(QueueWaitIdle)},
11204        {"vkDeviceWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(DeviceWaitIdle)},
11205        {"vkGetDeviceQueue", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceQueue)},
11206        {"vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance)},
11207        {"vkDestroyDevice", reinterpret_cast<PFN_vkVoidFunction>(DestroyDevice)},
11208        {"vkDestroyFence", reinterpret_cast<PFN_vkVoidFunction>(DestroyFence)},
11209        {"vkResetFences", reinterpret_cast<PFN_vkVoidFunction>(ResetFences)},
11210        {"vkDestroySemaphore", reinterpret_cast<PFN_vkVoidFunction>(DestroySemaphore)},
11211        {"vkDestroyEvent", reinterpret_cast<PFN_vkVoidFunction>(DestroyEvent)},
11212        {"vkDestroyQueryPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyQueryPool)},
11213        {"vkDestroyBuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyBuffer)},
11214        {"vkDestroyBufferView", reinterpret_cast<PFN_vkVoidFunction>(DestroyBufferView)},
11215        {"vkDestroyImage", reinterpret_cast<PFN_vkVoidFunction>(DestroyImage)},
11216        {"vkDestroyImageView", reinterpret_cast<PFN_vkVoidFunction>(DestroyImageView)},
11217        {"vkDestroyShaderModule", reinterpret_cast<PFN_vkVoidFunction>(DestroyShaderModule)},
11218        {"vkDestroyPipeline", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipeline)},
11219        {"vkDestroyPipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineLayout)},
11220        {"vkDestroySampler", reinterpret_cast<PFN_vkVoidFunction>(DestroySampler)},
11221        {"vkDestroyDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorSetLayout)},
11222        {"vkDestroyDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorPool)},
11223        {"vkDestroyFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyFramebuffer)},
11224        {"vkDestroyRenderPass", reinterpret_cast<PFN_vkVoidFunction>(DestroyRenderPass)},
11225        {"vkCreateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateBuffer)},
11226        {"vkCreateBufferView", reinterpret_cast<PFN_vkVoidFunction>(CreateBufferView)},
11227        {"vkCreateImage", reinterpret_cast<PFN_vkVoidFunction>(CreateImage)},
11228        {"vkCreateImageView", reinterpret_cast<PFN_vkVoidFunction>(CreateImageView)},
11229        {"vkCreateFence", reinterpret_cast<PFN_vkVoidFunction>(CreateFence)},
11230        {"vkCreatePipelineCache", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineCache)},
11231        {"vkDestroyPipelineCache", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineCache)},
11232        {"vkGetPipelineCacheData", reinterpret_cast<PFN_vkVoidFunction>(GetPipelineCacheData)},
11233        {"vkMergePipelineCaches", reinterpret_cast<PFN_vkVoidFunction>(MergePipelineCaches)},
11234        {"vkCreateGraphicsPipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateGraphicsPipelines)},
11235        {"vkCreateComputePipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateComputePipelines)},
11236        {"vkCreateSampler", reinterpret_cast<PFN_vkVoidFunction>(CreateSampler)},
11237        {"vkCreateDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorSetLayout)},
11238        {"vkCreatePipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineLayout)},
11239        {"vkCreateDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorPool)},
11240        {"vkResetDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(ResetDescriptorPool)},
11241        {"vkAllocateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(AllocateDescriptorSets)},
11242        {"vkFreeDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(FreeDescriptorSets)},
11243        {"vkUpdateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(UpdateDescriptorSets)},
11244        {"vkCreateCommandPool", reinterpret_cast<PFN_vkVoidFunction>(CreateCommandPool)},
11245        {"vkDestroyCommandPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyCommandPool)},
11246        {"vkResetCommandPool", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandPool)},
11247        {"vkCreateQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CreateQueryPool)},
11248        {"vkAllocateCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(AllocateCommandBuffers)},
11249        {"vkFreeCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(FreeCommandBuffers)},
11250        {"vkBeginCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(BeginCommandBuffer)},
11251        {"vkEndCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(EndCommandBuffer)},
11252        {"vkResetCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandBuffer)},
11253        {"vkCmdBindPipeline", reinterpret_cast<PFN_vkVoidFunction>(CmdBindPipeline)},
11254        {"vkCmdSetViewport", reinterpret_cast<PFN_vkVoidFunction>(CmdSetViewport)},
11255        {"vkCmdSetScissor", reinterpret_cast<PFN_vkVoidFunction>(CmdSetScissor)},
11256        {"vkCmdSetLineWidth", reinterpret_cast<PFN_vkVoidFunction>(CmdSetLineWidth)},
11257        {"vkCmdSetDepthBias", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBias)},
11258        {"vkCmdSetBlendConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdSetBlendConstants)},
11259        {"vkCmdSetDepthBounds", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBounds)},
11260        {"vkCmdSetStencilCompareMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilCompareMask)},
11261        {"vkCmdSetStencilWriteMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilWriteMask)},
11262        {"vkCmdSetStencilReference", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilReference)},
11263        {"vkCmdBindDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(CmdBindDescriptorSets)},
11264        {"vkCmdBindVertexBuffers", reinterpret_cast<PFN_vkVoidFunction>(CmdBindVertexBuffers)},
11265        {"vkCmdBindIndexBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdBindIndexBuffer)},
11266        {"vkCmdDraw", reinterpret_cast<PFN_vkVoidFunction>(CmdDraw)},
11267        {"vkCmdDrawIndexed", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexed)},
11268        {"vkCmdDrawIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndirect)},
11269        {"vkCmdDrawIndexedIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexedIndirect)},
11270        {"vkCmdDispatch", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatch)},
11271        {"vkCmdDispatchIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatchIndirect)},
11272        {"vkCmdCopyBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBuffer)},
11273        {"vkCmdCopyImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImage)},
11274        {"vkCmdBlitImage", reinterpret_cast<PFN_vkVoidFunction>(CmdBlitImage)},
11275        {"vkCmdCopyBufferToImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBufferToImage)},
11276        {"vkCmdCopyImageToBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImageToBuffer)},
11277        {"vkCmdUpdateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdUpdateBuffer)},
11278        {"vkCmdFillBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdFillBuffer)},
11279        {"vkCmdClearColorImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearColorImage)},
11280        {"vkCmdClearDepthStencilImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearDepthStencilImage)},
11281        {"vkCmdClearAttachments", reinterpret_cast<PFN_vkVoidFunction>(CmdClearAttachments)},
11282        {"vkCmdResolveImage", reinterpret_cast<PFN_vkVoidFunction>(CmdResolveImage)},
11283        {"vkCmdSetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdSetEvent)},
11284        {"vkCmdResetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdResetEvent)},
11285        {"vkCmdWaitEvents", reinterpret_cast<PFN_vkVoidFunction>(CmdWaitEvents)},
11286        {"vkCmdPipelineBarrier", reinterpret_cast<PFN_vkVoidFunction>(CmdPipelineBarrier)},
11287        {"vkCmdBeginQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginQuery)},
11288        {"vkCmdEndQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdEndQuery)},
11289        {"vkCmdResetQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CmdResetQueryPool)},
11290        {"vkCmdCopyQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyQueryPoolResults)},
11291        {"vkCmdPushConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdPushConstants)},
11292        {"vkCmdWriteTimestamp", reinterpret_cast<PFN_vkVoidFunction>(CmdWriteTimestamp)},
11293        {"vkCreateFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateFramebuffer)},
11294        {"vkCreateShaderModule", reinterpret_cast<PFN_vkVoidFunction>(CreateShaderModule)},
11295        {"vkCreateRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CreateRenderPass)},
11296        {"vkCmdBeginRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginRenderPass)},
11297        {"vkCmdNextSubpass", reinterpret_cast<PFN_vkVoidFunction>(CmdNextSubpass)},
11298        {"vkCmdEndRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdEndRenderPass)},
11299        {"vkCmdExecuteCommands", reinterpret_cast<PFN_vkVoidFunction>(CmdExecuteCommands)},
11300        {"vkSetEvent", reinterpret_cast<PFN_vkVoidFunction>(SetEvent)},
11301        {"vkMapMemory", reinterpret_cast<PFN_vkVoidFunction>(MapMemory)},
11302        {"vkUnmapMemory", reinterpret_cast<PFN_vkVoidFunction>(UnmapMemory)},
11303        {"vkFlushMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(FlushMappedMemoryRanges)},
11304        {"vkInvalidateMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(InvalidateMappedMemoryRanges)},
11305        {"vkAllocateMemory", reinterpret_cast<PFN_vkVoidFunction>(AllocateMemory)},
11306        {"vkFreeMemory", reinterpret_cast<PFN_vkVoidFunction>(FreeMemory)},
11307        {"vkBindBufferMemory", reinterpret_cast<PFN_vkVoidFunction>(BindBufferMemory)},
11308        {"vkGetBufferMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetBufferMemoryRequirements)},
11309        {"vkGetImageMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetImageMemoryRequirements)},
11310        {"vkGetQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(GetQueryPoolResults)},
11311        {"vkBindImageMemory", reinterpret_cast<PFN_vkVoidFunction>(BindImageMemory)},
11312        {"vkQueueBindSparse", reinterpret_cast<PFN_vkVoidFunction>(QueueBindSparse)},
11313        {"vkCreateSemaphore", reinterpret_cast<PFN_vkVoidFunction>(CreateSemaphore)},
11314        {"vkCreateEvent", reinterpret_cast<PFN_vkVoidFunction>(CreateEvent)},
11315    };
11316
11317    for (size_t i = 0; i < ARRAY_SIZE(core_device_commands); i++) {
11318        if (!strcmp(core_device_commands[i].name, name))
11319            return core_device_commands[i].proc;
11320    }
11321
11322    return nullptr;
11323}
11324
11325static PFN_vkVoidFunction
11326intercept_khr_swapchain_command(const char *name, VkDevice dev) {
11327    static const struct {
11328        const char *name;
11329        PFN_vkVoidFunction proc;
11330    } khr_swapchain_commands[] = {
11331        { "vkCreateSwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateSwapchainKHR) },
11332        { "vkDestroySwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySwapchainKHR) },
11333        { "vkGetSwapchainImagesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetSwapchainImagesKHR) },
11334        { "vkAcquireNextImageKHR", reinterpret_cast<PFN_vkVoidFunction>(AcquireNextImageKHR) },
11335        { "vkQueuePresentKHR", reinterpret_cast<PFN_vkVoidFunction>(QueuePresentKHR) },
11336    };
11337
11338    if (dev) {
11339        layer_data *dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
11340        if (!dev_data->device_extensions.wsi_enabled)
11341            return nullptr;
11342    }
11343
11344    for (size_t i = 0; i < ARRAY_SIZE(khr_swapchain_commands); i++) {
11345        if (!strcmp(khr_swapchain_commands[i].name, name))
11346            return khr_swapchain_commands[i].proc;
11347    }
11348
11349    return nullptr;
11350}
11351
11352} // namespace core_validation
11353
11354// vk_layer_logging.h expects these to be defined
11355
11356VKAPI_ATTR VkResult VKAPI_CALL
11357vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
11358                               const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
11359    return core_validation::CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
11360}
11361
11362VKAPI_ATTR void VKAPI_CALL
11363vkDestroyDebugReportCallbackEXT(VkInstance instance,
11364                                VkDebugReportCallbackEXT msgCallback,
11365                                const VkAllocationCallbacks *pAllocator) {
11366    core_validation::DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
11367}
11368
11369VKAPI_ATTR void VKAPI_CALL
11370vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
11371                        size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
11372    core_validation::DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
11373}
11374
11375// loader-layer interface v0, just wrappers since there is only a layer
11376
11377VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
11378vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
11379    return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
11380}
11381
11382VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
11383vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
11384    return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
11385}
11386
11387VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
11388vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
11389    // the layer command handles VK_NULL_HANDLE just fine internally
11390    assert(physicalDevice == VK_NULL_HANDLE);
11391    return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
11392}
11393
11394VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
11395                                                                                    const char *pLayerName, uint32_t *pCount,
11396                                                                                    VkExtensionProperties *pProperties) {
11397    // the layer command handles VK_NULL_HANDLE just fine internally
11398    assert(physicalDevice == VK_NULL_HANDLE);
11399    return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
11400}
11401
11402VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
11403    return core_validation::GetDeviceProcAddr(dev, funcName);
11404}
11405
11406VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
11407    return core_validation::GetInstanceProcAddr(instance, funcName);
11408}
11409